In the graph, I’d suggest to move keep_prob = tf.placeholder(tf.float32)
outside of the model
function to make it global.
with graph.as_default():
...
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
keep_prob = tf.placeholder(tf.float32)
def model(x, weights_hiden, weights_out, biases_hidden, biases_out, keep_prob):
# hidden layer with RELU activation
layer_1 = tf.nn.relu(tf.add(tf.matmul(x, weights_hiden), biases_hidden))
# apply DropOut to hidden layer
drop_out = tf.nn.dropout(layer_1, keep_prob) # DROP-OUT here
# output layer with linear activation
out_layer = tf.matmul(drop_out, weights_out) + biases_out
return out_layer
...
When running session
, feed a desired keep_prob
value during training time, and feed 1.0 to keep_prob
during reference (validation and/or testing) time.
# run the graph
with tf.Session(graph=graph) as sess:
tf.initialize_all_variables().run()
...
for epoch in range(training_epochs):
...
for i in range(total_batch):
batch_x = ...
batch_y = ...
# Run optimization op (backprop) and cost op (to get loss value)
# Feed a value < 1.0 for keep prob during training
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y, keep_prob : 0.5})
...
# Feed 1.0 for keep prob during testing
print("Test data accuracy:", accuracy.eval({x: test_dataset, y: test_labels, keep_prob : 1.0}))
print("Valid data accuracy:", accuracy.eval({x: valid_dataset, y: valid_labels, keep_prob : 1.0}))