@@ -14,7 +14,7 @@ def conv_out_size_same(size, stride):
1414 return int (math .ceil (float (size ) / float (stride )))
1515
1616class DCGAN (object ):
17- def __init__ (self , sess , input_height = 108 , input_width = 108 , is_crop = True ,
17+ def __init__ (self , sess , input_height = 108 , input_width = 108 , crop = True ,
1818 batch_size = 64 , sample_num = 64 , output_height = 64 , output_width = 64 ,
1919 y_dim = None , z_dim = 100 , gf_dim = 64 , df_dim = 64 ,
2020 gfc_dim = 1024 , dfc_dim = 1024 , c_dim = 3 , dataset_name = 'default' ,
@@ -33,7 +33,7 @@ def __init__(self, sess, input_height=108, input_width=108, is_crop=True,
3333 c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
3434 """
3535 self .sess = sess
36- self .is_crop = is_crop
36+ self .crop = crop
3737
3838 self .batch_size = batch_size
3939 self .sample_num = sample_num
@@ -52,7 +52,6 @@ def __init__(self, sess, input_height=108, input_width=108, is_crop=True,
5252 self .gfc_dim = gfc_dim
5353 self .dfc_dim = dfc_dim
5454
55-
5655 # batch normalization : deals with poor initialization helps gradient flow
5756 self .d_bn1 = batch_norm (name = 'd_bn1' )
5857 self .d_bn2 = batch_norm (name = 'd_bn2' )
@@ -76,17 +75,17 @@ def __init__(self, sess, input_height=108, input_width=108, is_crop=True,
7675 self .c_dim = self .data_X [0 ].shape [- 1 ]
7776 else :
7877 self .data = glob (os .path .join ("./data" , self .dataset_name , self .input_fname_pattern ))
79- self .c_dim = self .data [0 ].shape [- 1 ]
78+ self .c_dim = imread ( self .data [0 ]) .shape [- 1 ]
8079
81- self .is_grayscale = (self .c_dim == 1 )
80+ self .grayscale = (self .c_dim == 1 )
8281
8382 self .build_model ()
8483
8584 def build_model (self ):
8685 if self .y_dim :
8786 self .y = tf .placeholder (tf .float32 , [self .batch_size , self .y_dim ], name = 'y' )
8887
89- if self .is_crop :
88+ if self .crop :
9089 image_dims = [self .output_height , self .output_width , self .c_dim ]
9190 else :
9291 image_dims = [self .input_height , self .input_width , self .c_dim ]
@@ -179,9 +178,9 @@ def train(self, config):
179178 input_width = self .input_width ,
180179 resize_height = self .output_height ,
181180 resize_width = self .output_width ,
182- is_crop = self .is_crop ,
183- is_grayscale = self .is_grayscale ) for sample_file in sample_files ]
184- if (self .is_grayscale ):
181+ crop = self .crop ,
182+ grayscale = self .grayscale ) for sample_file in sample_files ]
183+ if (self .grayscale ):
185184 sample_inputs = np .array (sample ).astype (np .float32 )[:, :, :, None ]
186185 else :
187186 sample_inputs = np .array (sample ).astype (np .float32 )
@@ -215,9 +214,9 @@ def train(self, config):
215214 input_width = self .input_width ,
216215 resize_height = self .output_height ,
217216 resize_width = self .output_width ,
218- is_crop = self .is_crop ,
219- is_grayscale = self .is_grayscale ) for batch_file in batch_files ]
220- if ( self .is_grayscale ) :
217+ crop = self .crop ,
218+ grayscale = self .grayscale ) for batch_file in batch_files ]
219+ if self .grayscale :
221220 batch_images = np .array (batch ).astype (np .float32 )[:, :, :, None ]
222221 else :
223222 batch_images = np .array (batch ).astype (np .float32 )
0 commit comments