hwRecProto1.py 2.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. ########################################################################################
  2. # Author: Thomas Flucke
  3. # Date: 2017-05-13
  4. # Abreviations:
  5. # vect = Vector
  6. # ANN = Artifical Neural Network
  7. # corr = Correct version
  8. ########################################################################################
  9. # Download the MNIST dataset
  10. print "Importing MNIST parsing libraries..."
  11. from tensorflow.examples.tutorials.mnist import input_data
  12. print "done"
  13. print "Downloading MNIST dataset..."
  14. mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
  15. print "done"
  16. # mnist.train = 55 000 train points
  17. # mnist.test = 1 000 test points
  18. # mnist.validation = 5 000 ? points
  19. # Inputs = array[784] : Each index = 1px
  20. # Outputs = array[10] : Each index = corresponding digit
  21. ########################################################################################
  22. # Import tensorflow library and define AAN
  23. import tensorflow as tf
  24. LEARNING_CONST = 0.5
  25. # Create a tensorflow placeholder for an array [nx784] float32's (a.k.a. n MNIST vectors)
  26. inVect = tf.placeholder(tf.float32, [None, 784])
  27. # Initalize the ANN with zero's
  28. # Define tensorflow variable for the weight matrix [784x10] so we can matrix multiply
  29. weights = tf.Variable(tf.zeros([784, 10]))
  30. # Define tensorflow variable for the bias vector
  31. biases = tf.Variable(tf.zeros([10]))
  32. # Define formula for calculating output [nx10]
  33. outVect = tf.nn.softmax(tf.matmul(inVect, weights) + biases)
  34. # Create a tensorflow placeholder for the correct answer vector
  35. outVectCorr = tf.placeholder(tf.float32, [None, 10])
  36. # Calculate how incorrect the solutions arrive were
  37. crossEntropy = tf.reduce_mean(
  38. -tf.reduce_sum(
  39. outVectCorr * tf.log(outVect),
  40. # Tells reduce_sum to use the 10-length array, and not the n-length
  41. reduction_indices=[1]
  42. )
  43. )
  44. trainStep = tf.train.GradientDescentOptimizer(LEARNING_CONST).minimize(crossEntropy)
  45. ########################################################################################
  46. # Define accuracy checking conditions
  47. # Define formula for determining correctness
  48. # Highest value in outVect 1st index == highest value in correct outVect 1st index
  49. predictionCorr = tf.equal(tf.argmax(outVect, 1), tf.argmax(outVectCorr, 1))
  50. # Calculate how accurate the system was
  51. accuracy = tf.reduce_mean(tf.cast(predictionCorr, tf.float32))
  52. ########################################################################################
  53. # Run the system
  54. # Create interactive session
  55. sess = tf.InteractiveSession()
  56. # Initialize variables
  57. tf.global_variables_initializer().run()
  58. for _ in range(1000) :
  59. # Get 100 random digits from training set
  60. batchIns, batchOuts = mnist.train.next_batch(100)
  61. # Run the training step in the interactive session with the given inputs/outputs
  62. sess.run(trainStep, feed_dict={inVect: batchIns, outVectCorr: batchOuts})
  63. # Check accuracy
  64. print(sess.run(accuracy, feed_dict={inVect: mnist.test.images, outVectCorr: mnist.test.labels}))