2_11.py 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. import tensorflow as tf
  2. import numpy as np
  3. # prepare dataset
  4. def raw_data():
  5. for i in range(2**4):
  6. for j in range(2**4):
  7. yield [[float(x) for x in '%04d%04d'%(int(bin(i)[2:]),int(bin(j)[2:]))], [float(x) for x in '%05d'%int(bin(i + j)[2:])]]
  8. raw = list(raw_data())
  9. rng = np.random.default_rng()
  10. rng.shuffle(raw)
  11. raw_len = len(raw)
  12. train_len = int(raw_len * 0.8)
  13. train_data, train_label = zip(*raw[:train_len])
  14. val_data, val_label = zip(*raw[train_len:])
  15. # learning params
  16. learning_rate = 0.1
  17. training_epochs = 200
  18. batch_size = 16
  19. # network params
  20. n_hidden_1 = 30
  21. n_hidden_2 = 11
  22. n_input = 8
  23. n_output = 5
  24. # model
  25. x = tf.placeholder("float", [None, n_input])
  26. y = tf.placeholder("float", [None, n_output])
  27. def mlp(_X, _weights, _biases):
  28. layer1 = tf.nn.sigmoid(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
  29. layer2 = tf.nn.sigmoid(tf.add(tf.matmul(layer1, _weights['h2']), _biases['b2']))
  30. return tf.matmul(layer2, _weights['out']) + _biases['out']
  31. weights = {
  32. 'h1' : tf.Variable(tf.random_normal([n_input, n_hidden_1])),
  33. 'h2' : tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
  34. 'out' : tf.Variable(tf.random_normal([n_hidden_2, n_output]))
  35. }
  36. biases = {
  37. 'b1' : tf.Variable(tf.random_normal([n_hidden_1])),
  38. 'b2' : tf.Variable(tf.random_normal([n_hidden_2])),
  39. 'out' : tf.Variable(tf.random_normal([n_output]))
  40. }
  41. pred = mlp(x, weights, biases)
  42. cost = tf.reduce_mean(tf.square(pred - y))
  43. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
  44. init = tf.global_variables_initializer()
  45. train_data_batches = np.array_split(train_data, len(train_data) // batch_size)
  46. train_label_batches = np.array_split(train_label, len(train_label) // batch_size)
  47. correct_prediction = tf.equal(y, tf.where(pred<0.5, x = tf.zeros_like(pred), y = tf.ones_like(pred)))
  48. accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
  49. # training
  50. with tf.Session() as sess:
  51. sess.run(init)
  52. for epoch in range(training_epochs):
  53. for i, j in zip(train_data_batches, train_label_batches):
  54. sess.run(optimizer, feed_dict={x: i, y: j})
  55. print("epoch:", epoch, "Train Accuracy:", accuracy.eval({x: train_data, y: train_label}), "Val Accuracy:", accuracy.eval({x: val_data, y: val_label}))