mlp_tf.py 2.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. import tensorflow as tf
  2. import numpy as np
  3. def get_one_hot(targets, nb_classes):
  4. res = np.eye(nb_classes)[np.array(targets).reshape(-1)]
  5. return res.reshape(list(targets.shape)+[nb_classes])
  6. data_slice = 5
  7. def load():
  8. raw_data = np.loadtxt('wine.data', delimiter=',')
  9. data_size = len(raw_data)
  10. np.random.shuffle(raw_data)
  11. data = np.array_split(raw_data, data_slice)
  12. ret = [np.split(i, [1,], axis=1) for i in data]
  13. return ret
  14. data_pieces = load()
  15. val_idx = 0
  16. train_label = np.concatenate(
  17. [data_pieces[i][0] for i in list(range(data_slice))[:val_idx] + list(range(data_slice))[val_idx+1:]]
  18. )
  19. train_label = train_label - 1
  20. train_label = train_label.flatten().astype(int)
  21. train_label = get_one_hot(train_label, 3)
  22. train_data = np.concatenate(
  23. [data_pieces[i][1] for i in list(range(data_slice))[:val_idx] + list(range(data_slice))[val_idx+1:]]
  24. )
  25. val_label = data_pieces[val_idx][0]
  26. val_data = data_pieces[val_idx][1]
  27. val_label = val_label - 1
  28. val_label = val_label.flatten().astype(int)
  29. val_label = get_one_hot(val_label, 3)
  30. # learning params
  31. learning_rate = 0.001
  32. training_epochs = 200
  33. batch_size = 4
  34. # network params
  35. n_hidden_1 = 256
  36. n_hidden_2 = 128
  37. n_input = 13
  38. n_classses = 3
  39. # model
  40. x = tf.placeholder("float", [None, n_input])
  41. y = tf.placeholder("float", [None, n_classses])
  42. def mlp(_X, _weights, _biases):
  43. layer1 = tf.nn.sigmoid(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
  44. layer2 = tf.nn.sigmoid(tf.add(tf.matmul(layer1, _weights['h2']), _biases['b2']))
  45. return tf.matmul(layer2, _weights['out']) + _biases['out']
  46. weights = {
  47. 'h1' : tf.Variable(tf.random_normal([n_input, n_hidden_1])),
  48. 'h2' : tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
  49. 'out' : tf.Variable(tf.random_normal([n_hidden_2, n_classses]))
  50. }
  51. biases = {
  52. 'b1' : tf.Variable(tf.random_normal([n_hidden_1])),
  53. 'b2' : tf.Variable(tf.random_normal([n_hidden_2])),
  54. 'out' : tf.Variable(tf.random_normal([n_classses]))
  55. }
  56. pred = mlp(x, weights, biases)
  57. cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=pred))
  58. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
  59. init = tf.global_variables_initializer()
  60. train_data_batches = np.array_split(train_data, len(train_data) // batch_size)
  61. train_label_batches = np.array_split(train_label, len(train_label) // batch_size)
  62. correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
  63. with tf.Session() as sess:
  64. sess.run(init)
  65. #Training cycle
  66. for epoch in range(training_epochs):
  67. for i, j in zip(train_data_batches, train_label_batches):
  68. sess.run(optimizer, feed_dict={x: i, y: j})
  69. accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
  70. print("Train Accuracy:", accuracy.eval({x: train_data, y: train_label}))
  71. print("Val Accuracy:", accuracy.eval({x: val_data, y: val_label}))