|
@@ -0,0 +1,69 @@
|
|
|
+import tensorflow as tf
|
|
|
+import numpy as np
|
|
|
+
|
|
|
+
|
|
|
+def raw_data():
|
|
|
+ for i in range(2**4):
|
|
|
+ for j in range(2**4):
|
|
|
+ yield [[float(x) for x in '%04d%04d'%(int(bin(i)[2:]),int(bin(j)[2:]))], [float(x) for x in '%05d'%int(bin(i + j)[2:])]]
|
|
|
+raw = list(raw_data())
|
|
|
+rng = np.random.default_rng()
|
|
|
+rng.shuffle(raw)
|
|
|
+raw_len = len(raw)
|
|
|
+
|
|
|
+train_len = int(raw_len * 0.8)
|
|
|
+train_data, train_label = zip(*raw[:train_len])
|
|
|
+val_data, val_label = zip(*raw[train_len:])
|
|
|
+
|
|
|
+
|
|
|
+learning_rate = 0.1
|
|
|
+training_epochs = 200
|
|
|
+batch_size = 16
|
|
|
+
|
|
|
+
|
|
|
+n_hidden_1 = 30
|
|
|
+n_hidden_2 = 11
|
|
|
+n_input = 8
|
|
|
+n_output = 5
|
|
|
+
|
|
|
+
|
|
|
+x = tf.placeholder("float", [None, n_input])
|
|
|
+y = tf.placeholder("float", [None, n_output])
|
|
|
+
|
|
|
+def mlp(_X, _weights, _biases):
|
|
|
+ layer1 = tf.nn.sigmoid(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
|
|
|
+ layer2 = tf.nn.sigmoid(tf.add(tf.matmul(layer1, _weights['h2']), _biases['b2']))
|
|
|
+ return tf.matmul(layer2, _weights['out']) + _biases['out']
|
|
|
+
|
|
|
+weights = {
|
|
|
+ 'h1' : tf.Variable(tf.random_normal([n_input, n_hidden_1])),
|
|
|
+ 'h2' : tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
|
|
|
+ 'out' : tf.Variable(tf.random_normal([n_hidden_2, n_output]))
|
|
|
+}
|
|
|
+
|
|
|
+biases = {
|
|
|
+ 'b1' : tf.Variable(tf.random_normal([n_hidden_1])),
|
|
|
+ 'b2' : tf.Variable(tf.random_normal([n_hidden_2])),
|
|
|
+ 'out' : tf.Variable(tf.random_normal([n_output]))
|
|
|
+}
|
|
|
+
|
|
|
+pred = mlp(x, weights, biases)
|
|
|
+cost = tf.reduce_mean(tf.square(pred - y))
|
|
|
+optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
|
|
|
+
|
|
|
+init = tf.global_variables_initializer()
|
|
|
+
|
|
|
+train_data_batches = np.array_split(train_data, len(train_data) // batch_size)
|
|
|
+train_label_batches = np.array_split(train_label, len(train_label) // batch_size)
|
|
|
+
|
|
|
+correct_prediction = tf.equal(y, tf.where(pred<0.5, x = tf.zeros_like(pred), y = tf.ones_like(pred)))
|
|
|
+accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+with tf.Session() as sess:
|
|
|
+ sess.run(init)
|
|
|
+ for epoch in range(training_epochs):
|
|
|
+ for i, j in zip(train_data_batches, train_label_batches):
|
|
|
+ sess.run(optimizer, feed_dict={x: i, y: j})
|
|
|
+ print("epoch:", epoch, "Train Accuracy:", accuracy.eval({x: train_data, y: train_label}), "Val Accuracy:", accuracy.eval({x: val_data, y: val_label}))
|