12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091 |
- import tensorflow as tf
- import numpy as np
- def get_one_hot(targets, nb_classes):
- res = np.eye(nb_classes)[np.array(targets).reshape(-1)]
- return res.reshape(list(targets.shape)+[nb_classes])
- data_slice = 5
- def load():
- raw_data = np.loadtxt('wine.data', delimiter=',')
- data_size = len(raw_data)
- np.random.shuffle(raw_data)
- data = np.array_split(raw_data, data_slice)
- ret = [np.split(i, [1,], axis=1) for i in data]
- return ret
- data_pieces = load()
- val_idx = 0
- train_label = np.concatenate(
- [data_pieces[i][0] for i in list(range(data_slice))[:val_idx] + list(range(data_slice))[val_idx+1:]]
- )
- train_label = train_label - 1
- train_label = train_label.flatten().astype(int)
- train_label = get_one_hot(train_label, 3)
- train_data = np.concatenate(
- [data_pieces[i][1] for i in list(range(data_slice))[:val_idx] + list(range(data_slice))[val_idx+1:]]
- )
- val_label = data_pieces[val_idx][0]
- val_data = data_pieces[val_idx][1]
- val_label = val_label - 1
- val_label = val_label.flatten().astype(int)
- val_label = get_one_hot(val_label, 3)
- # learning params
- learning_rate = 0.001
- training_epochs = 200
- batch_size = 4
- # network params
- n_hidden_1 = 256
- n_hidden_2 = 128
- n_input = 13
- n_classses = 3
- # model
- x = tf.placeholder("float", [None, n_input])
- y = tf.placeholder("float", [None, n_classses])
- def mlp(_X, _weights, _biases):
- layer1 = tf.nn.sigmoid(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
- layer2 = tf.nn.sigmoid(tf.add(tf.matmul(layer1, _weights['h2']), _biases['b2']))
- return tf.matmul(layer2, _weights['out']) + _biases['out']
- weights = {
- 'h1' : tf.Variable(tf.random_normal([n_input, n_hidden_1])),
- 'h2' : tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
- 'out' : tf.Variable(tf.random_normal([n_hidden_2, n_classses]))
- }
- biases = {
- 'b1' : tf.Variable(tf.random_normal([n_hidden_1])),
- 'b2' : tf.Variable(tf.random_normal([n_hidden_2])),
- 'out' : tf.Variable(tf.random_normal([n_classses]))
- }
- pred = mlp(x, weights, biases)
- cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=pred))
- optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
- init = tf.global_variables_initializer()
- train_data_batches = np.array_split(train_data, len(train_data) // batch_size)
- train_label_batches = np.array_split(train_label, len(train_label) // batch_size)
- correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
- with tf.Session() as sess:
- sess.run(init)
- #Training cycle
- for epoch in range(training_epochs):
- for i, j in zip(train_data_batches, train_label_batches):
- sess.run(optimizer, feed_dict={x: i, y: j})
- accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
- print("Train Accuracy:", accuracy.eval({x: train_data, y: train_label}))
- print("Val Accuracy:", accuracy.eval({x: val_data, y: val_label}))
|