diff --git a/mnist_cnn_train.py b/mnist_cnn_train.py index 5415209..9bb62aa 100644 --- a/mnist_cnn_train.py +++ b/mnist_cnn_train.py @@ -49,7 +49,7 @@ def train(): loss = slim.losses.softmax_cross_entropy(y,y_) # Create a summary to monitor loss tensor - tf.scalar_summary('loss', loss) + tf.summary.scalar('loss', loss) # Define optimizer with tf.name_scope("ADAM"): @@ -67,7 +67,7 @@ def train(): train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss,global_step=batch) # Create a summary to monitor learning_rate tensor - tf.scalar_summary('learning_rate', learning_rate) + tf.summary.scalar('learning_rate', learning_rate) # Get accuracy of model with tf.name_scope("ACC"): @@ -75,10 +75,10 @@ def train(): accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Create a summary to monitor accuracy tensor - tf.scalar_summary('acc', accuracy) + tf.summary.scalar('acc', accuracy) # Merge all summaries into a single op - merged_summary_op = tf.merge_all_summaries() + merged_summary_op = tf.summary.merge_all() # Add ops to save and restore all the variables saver = tf.train.Saver() @@ -89,7 +89,7 @@ def train(): total_batch = int(train_size / batch_size) # op to write logs to Tensorboard - summary_writer = tf.train.SummaryWriter(LOGS_DIRECTORY, graph=tf.get_default_graph()) + summary_writer = tf.summary.FileWriter(LOGS_DIRECTORY, graph=tf.get_default_graph()) # Save the maximum accuracy value for validation data max_acc = 0.