mlp_tf.py 3.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. import tensorflow as tf
  2. import numpy as np
  3. def get_one_hot(targets, nb_classes):
  4. res = np.eye(nb_classes)[np.array(targets).reshape(-1)]
  5. return res.reshape(list(targets.shape)+[nb_classes])
  6. def load(file):
  7. raw_data = np.loadtxt(file, delimiter=',')
  8. data_size = len(raw_data)
  9. np.random.shuffle(raw_data)
  10. ret = np.split(raw_data, [1,], axis=1)
  11. return ret
  12. train_pieces = load('SPECT.train')
  13. train_label = train_pieces[0]
  14. train_data = train_pieces[1]
  15. # train_label = train_label - 1
  16. train_label = train_label.flatten().astype(int)
  17. train_label = get_one_hot(train_label, 2)
  18. val_pieces = load('SPECT.test')
  19. val_label = val_pieces[0]
  20. val_data = val_pieces[1]
  21. # val_label = val_label - 1
  22. val_label = val_label.flatten().astype(int)
  23. val_label2 = val_label.flatten().astype(int)
  24. val_label = get_one_hot(val_label, 2)
  25. # learning params
  26. learning_rate = 0.005
  27. training_epochs = 200
  28. batch_size = 16
  29. # network params
  30. n_hidden_1 = 32
  31. n_hidden_2 = 8
  32. n_input = 22
  33. n_classses = 2
  34. # model
  35. x = tf.placeholder("float", [None, n_input])
  36. y = tf.placeholder("float", [None, n_classses])
  37. def mlp(_X, _weights, _biases):
  38. layer1 = tf.nn.sigmoid(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
  39. layer2 = tf.nn.sigmoid(tf.add(tf.matmul(layer1, _weights['h2']), _biases['b2']))
  40. return tf.matmul(layer2, _weights['out']) + _biases['out']
  41. weights = {
  42. 'h1' : tf.Variable(tf.random_normal([n_input, n_hidden_1])),
  43. 'h2' : tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
  44. 'out' : tf.Variable(tf.random_normal([n_hidden_2, n_classses]))
  45. }
  46. biases = {
  47. 'b1' : tf.Variable(tf.random_normal([n_hidden_1])),
  48. 'b2' : tf.Variable(tf.random_normal([n_hidden_2])),
  49. 'out' : tf.Variable(tf.random_normal([n_classses]))
  50. }
  51. pred = mlp(x, weights, biases)
  52. cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=pred))
  53. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
  54. init = tf.global_variables_initializer()
  55. train_data_batches = np.array_split(train_data, len(train_data) // batch_size)
  56. train_label_batches = np.array_split(train_label, len(train_label) // batch_size)
  57. correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
  58. pred_label = tf.argmax(pred, 1)
  59. with tf.Session() as sess:
  60. sess.run(init)
  61. #Training cycle
  62. for epoch in range(training_epochs):
  63. for i, j in zip(train_data_batches, train_label_batches):
  64. sess.run(optimizer, feed_dict={x: i, y: j})
  65. accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
  66. pl = pred_label.eval({x: val_data, y: val_label})
  67. TP = sum(pl[i] == 1 and val_label2[i] == 1 for i in range(len(val_label2)))
  68. FP = sum(pl[i] == 1 and val_label2[i] == 0 for i in range(len(val_label2)))
  69. FN = sum(pl[i] == 0 and val_label2[i] == 1 for i in range(len(val_label2)))
  70. P = TP / (TP + FP)
  71. R = TP / (TP + FN)
  72. print("Train Accuracy:", accuracy.eval({x: train_data, y: train_label}))
  73. print("Val Accuracy:", accuracy.eval({x: val_data, y: val_label}))
  74. print('F1', 2 * P * R / (P + R))