-
Notifications
You must be signed in to change notification settings - Fork 10
Bug fixes and minor refactoring #2
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
8e86154
893d4aa
f4ff010
0d0904d
b6ae495
718ed05
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,6 +1,6 @@ | ||
| ### | ||
| ### Dockerfile used for augmentation pipeline code | ||
| ### | ||
| FROM gcr.io/tensorflow/tensorflow:latest-gpu | ||
| FROM tensorflow/tensorflow:1.15.2-gpu | ||
| MAINTAINER Vincent Vanhoucke <vanhoucke@google.com> | ||
| RUN pip install scikit-learn scikit-image opencv-python==3.2.0.8 | ||
| RUN pip install scipy==1.1.0 scikit-learn scikit-image opencv-python==3.2.0.8 | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. scipy 1.2.0+ removes scipy.imread and probably other functions needed by SEA. It was faster to install an older version than refactor the code for newer scipy |
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -215,41 +215,43 @@ def return_bayer(bayer_type, im_h, im_w, batchsize): | |
| # | ||
| # generate the CFA arrays for R,G,B based upon the r pixel location: | ||
| # | ||
| h = int(im_h / 2) | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Leaving h and w as floats causes a crash in np.tile |
||
| w = int(im_w / 2) | ||
| if bayer_type=='BGGR': | ||
| # bggr | ||
| Cr=np.array([[1,0],[0,0]]) | ||
| Cg=np.array([[0,1],[1,0]]) | ||
| Cb=np.array([[0,0],[0,1]]) | ||
| Rcfa= np.tile( Cr, (im_h/2,im_w/2)) | ||
| Gcfa= np.tile( Cg, (im_h/2,im_w/2)) | ||
| Bcfa= np.tile( Cb, (im_h/2,im_w/2)) | ||
| Rcfa= np.tile( Cr, (h, w)) | ||
| Gcfa= np.tile( Cg, (h, w)) | ||
| Bcfa= np.tile( Cb, (h, w)) | ||
| # | ||
| if bayer_type=='GBRG': | ||
| ## gbrg | ||
| Cr2=np.array([[0,1],[0,0]]) | ||
| Cg2=np.array([[1,0],[0,1]]) | ||
| Cb2=np.array([[0,0],[1,0]]) | ||
| Rcfa= np.tile( Cr2, (im_h/2,im_w/2)) | ||
| Gcfa= np.tile( Cg2, (im_h/2,im_w/2)) | ||
| Bcfa= np.tile( Cb2, (im_h/2,im_w/2)) | ||
| Rcfa= np.tile( Cr2, (h, w)) | ||
| Gcfa= np.tile( Cg2, (h, w)) | ||
| Bcfa= np.tile( Cb2, (h, w)) | ||
| # | ||
| if bayer_type=='GRBG': | ||
| ## grbg | ||
| Cr3=np.array([[0,0],[1,0]]) | ||
| Cg3=np.array([[1,0],[0,1]]) | ||
| Cb3=np.array([[0,1],[0,0]]) | ||
| Rcfa= np.tile( Cr3, (im_h/2,im_w/2)) | ||
| Gcfa= np.tile( Cg3, (im_h/2,im_w/2)) | ||
| Bcfa= np.tile( Cb3, (im_h/2,im_w/2)) | ||
| Rcfa= np.tile( Cr3, (h, w)) | ||
| Gcfa= np.tile( Cg3, (h, w)) | ||
| Bcfa= np.tile( Cb3, (h, w)) | ||
| # | ||
| if bayer_type=='RGGB': | ||
| ## rggb | ||
| Cr4=np.array([[0,0],[0,1]]) | ||
| Cg4=np.array([[0,1],[1,0]]) | ||
| Cb4=np.array([[1,0],[0,0]]) | ||
| Rcfa= np.tile( Cr4, (im_h/2,im_w/2)) | ||
| Gcfa= np.tile( Cg4, (im_h/2,im_w/2)) | ||
| Bcfa= np.tile( Cb4, (im_h/2,im_w/2)) | ||
| Rcfa= np.tile( Cr4, (h, w)) | ||
| Gcfa= np.tile( Cg4, (h, w)) | ||
| Bcfa= np.tile( Cb4, (h, w)) | ||
| # | ||
| Rcfa= np.tile( Rcfa, (batchsize,1,1)) | ||
| Gcfa= np.tile( Gcfa, (batchsize,1,1)) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,38 +1,35 @@ | ||
| import os | ||
| import scipy.misc | ||
| import numpy as np | ||
| #os.environ["CUDA_VISIBLE_DEVICES"]="1" | ||
| from model import camGAN | ||
| from utils import pp | ||
| import tensorflow as tf | ||
| import argparse | ||
|
|
||
| flags = tf.app.flags | ||
| flags.DEFINE_integer("epoch", 1, "number of epochs; corresponds to number of augmentations to perform on the dataset (i.e., epoch =2 means the dataset will be augmented twice") | ||
| flags.DEFINE_integer("batch_size", 2, "The size of batch images; must be a multiple of n and >1") | ||
| flags.DEFINE_integer("c_dim", 3, "Dimension of image color. [3]") | ||
| # | ||
| flags.DEFINE_string("Img_dataset","generator_images","The name (full path) of dataset to augment") | ||
| flags.DEFINE_integer("Img_height",512, "The size of the output images to produce [64]") | ||
| flags.DEFINE_integer("Img_width", 1024, "The size of the output images to produce. If None, same value as output_height [None]") | ||
| flags.DEFINE_boolean("chromab_flag", True, "flag that specifies whether to perform Chromatic aberration augmentation") | ||
| flags.DEFINE_boolean("blur_flag", True, "flag that specifies whether to perform Blur augmentation") | ||
| flags.DEFINE_boolean("exposure_flag", True, "flag that specifies whether to perform Exposure augmentation") | ||
| flags.DEFINE_boolean("noise_flag", True, "flag that specifies whether to perform noise augmentation") | ||
| flags.DEFINE_boolean("color_flag", True, "flag that specifies whether to perform color shift augmentation") | ||
| flags.DEFINE_boolean("save_aug_params_flag", False, "flag that specifies whether to save aug. parameters for each image") | ||
| # | ||
| flags.DEFINE_string("input_fname_pattern", "*.png", "Glob pattern of filename of input images [*]") | ||
| flags.DEFINE_string("results_dir", "results", "Directory name to save the augmented images [results]") | ||
| FLAGS = flags.FLAGS | ||
| parser = argparse.ArgumentParser(description='Augment a dataset') | ||
| parser.add_argument('-n', type=int, default=1, nargs='?', help='sets the number of augmentations to perform on the dataset i.e., setting n to 2 means the dataset will be augmented twice') | ||
| parser.add_argument('-b', '--batch_size', type=int, default=64, nargs='?', help='size of batches; must be a multiple of n and >1') | ||
| parser.add_argument('-c', '--channels', type=int, default=3, nargs='?', help='dimension of image color channel (note that any channel >3 will be discarded') | ||
| parser.add_argument('-i', '--input', type=str, help='path to the dataset to augment') | ||
| parser.add_argument('-o', '--output', type=str, default='results', nargs='?', help='path where the augmented dataset will be saved') | ||
| parser.add_argument('--pattern', type=str, default="*.png", nargs='?', help='glob pattern of filename of input images') | ||
| parser.add_argument('--image_height', type=int, default=512, nargs='?', help='size of the output images to produce (note that all images will be resized to the specified image_height x image_width)') | ||
| parser.add_argument('--image_width', type=int, default=1024, nargs='?', help='size of the output images to produce. If None, same value as output_height') | ||
| parser.add_argument('--chromatic_aberration', type=bool, default=False, nargs='?', help='perform chromatic aberration augmentation') | ||
| parser.add_argument('--blur', type=bool, default=False, nargs='?', help='perform blur augmentation') | ||
| parser.add_argument('--exposure', type=bool, default=False, nargs='?', help='perform exposure augmentation') | ||
| parser.add_argument('--noise', type=bool, default=False, nargs='?', help='perform noise augmentation') | ||
| parser.add_argument('--colour_shift', type=bool, default=False, nargs='?', help='perform colour shift augmentation') | ||
| parser.add_argument('--save_params', type=bool, default=False, nargs='?', help='save augmentation parameters for each image') | ||
| args = parser.parse_args() | ||
|
Comment on lines
-9
to
+23
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Replaced tf's deprecated flags with the much more robust argparse. Cleaned up the arguments to hopefully be clearer |
||
|
|
||
| def main(_): | ||
| pp.pprint(flags.FLAGS.__flags) | ||
| print(args) | ||
| ## | ||
| if FLAGS.Img_width is None: | ||
| FLAGS.Img_width = FLAGS.Img_height | ||
| if args.image_width is None: | ||
| args.image_width = args.image_height | ||
| ## | ||
| if not os.path.exists(FLAGS.results_dir): | ||
| os.makedirs(FLAGS.results_dir) | ||
| if not os.path.exists(args.output): | ||
| os.makedirs(args.output) | ||
| ## | ||
| run_config = tf.ConfigProto() | ||
| ## allocate only as much GPU memory based on runtime allocations | ||
|
|
@@ -41,27 +38,27 @@ def main(_): | |
| with tf.Session(config=run_config) as sess: | ||
| autoauggan = camGAN( | ||
| sess, | ||
| Img_width=FLAGS.Img_width, | ||
| Img_height=FLAGS.Img_height, | ||
| batch_size=FLAGS.batch_size, | ||
| c_dim=FLAGS.c_dim, | ||
| Img_dataset_name = FLAGS.Img_dataset, | ||
| chromab_flag = FLAGS.chromab_flag, | ||
| blur_flag = FLAGS.blur_flag, | ||
| exposure_flag = FLAGS.exposure_flag, | ||
| noise_flag = FLAGS.noise_flag, | ||
| color_flag = FLAGS.color_flag, | ||
| save_aug_params_flag = FLAGS.save_aug_params_flag, | ||
| input_fname_pattern=FLAGS.input_fname_pattern, | ||
| results_dir = FLAGS.results_dir) | ||
| image_width=args.image_width, | ||
| image_height=args.image_height, | ||
| batch_size=args.batch_size, | ||
| channels=args.channels, | ||
| input = args.input, | ||
| chromatic_aberration = args.chromatic_aberration, | ||
| blur = args.blur, | ||
| exposure = args.exposure, | ||
| noise = args.noise, | ||
| colour_shift = args.colour_shift, | ||
| save_params = args.save_params, | ||
| pattern=args.pattern, | ||
| output = args.output) | ||
|
|
||
| #if FLAGS.is_train: | ||
| #if args.is_train: | ||
| if True: | ||
| autoauggan.augment_batches(FLAGS) | ||
| autoauggan.augment_batches(args) | ||
| else: | ||
| if not autoauggan.load(FLAGS.checkpoint_dir): | ||
| if not autoauggan.load(args.checkpoint_dir): | ||
| raise Exception("[!] Train a model first, then run test mode") | ||
| #wgan.test(FLAGS) | ||
| #wgan.test(args) | ||
|
|
||
| if __name__ == '__main__': | ||
| tf.app.run() | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
gcr.io doesn't host tensorflow dockers anymore