1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
| import numpy as np import keras as K import tensorflow as tf import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer from keras import callbacks
def load_data(CSV_FILE_PATH): IRIS = pd.read_csv(CSV_FILE_PATH) target_var = 'class' features = list(IRIS.columns) features.remove(target_var) Class = IRIS[target_var].unique() Class_dict = dict(zip(Class, range(len(Class)))) IRIS['target'] = IRIS[target_var].apply(lambda x: Class_dict[x]) lb = LabelBinarizer() lb.fit(list(Class_dict.values())) transformed_labels = lb.transform(IRIS['target']) y_bin_labels = [] for i in range(transformed_labels.shape[1]): y_bin_labels.append('y' + str(i)) IRIS['y' + str(i)] = transformed_labels[:, i] train_x, test_x, train_y, test_y = train_test_split(IRIS[features], IRIS[y_bin_labels], \ train_size=0.7, test_size=0.3, random_state=0) return train_x, test_x, train_y, test_y, Class_dict
if __name__ == '__main__':
print("\nIris dataset using Keras") np.random.seed(4) tf.set_random_seed(13)
print("Loading Iris data into memory") CSV_FILE_PATH = 'iris.csv' train_x, test_x, train_y, test_y, Class_dict = load_data(CSV_FILE_PATH)
init = K.initializers.glorot_uniform(seed=1) simple_adam = K.optimizers.Adam() model = K.models.Sequential() model.add(K.layers.Dense(units=5, input_dim=4, kernel_initializer=init, activation='relu')) model.add(K.layers.Dense(units=6, kernel_initializer=init, activation='relu')) model.add(K.layers.Dense(units=3, kernel_initializer=init, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=simple_adam, metrics=['accuracy'])
b_size = 1 max_epochs = 100 print("Starting training ") remote = callbacks.RemoteMonitor(root='http://localhost:9000') h = model.fit(train_x, train_y, validation_data=(test_x, test_y), batch_size=b_size, epochs=max_epochs, shuffle=True, verbose=1, callbacks=[remote]) print("Training finished \n")
eval = model.evaluate(test_x, test_y, verbose=0) print("Evaluation on test data: loss = %0.6f accuracy = %0.2f%% \n" \ % (eval[0], eval[1] * 100) )
np.set_printoptions(precision=4) unknown = np.array([[6.1, 3.1, 5.1, 1.1]], dtype=np.float32) predicted = model.predict(unknown) print("Using model to predict species for features: ") print(unknown) print("\nPredicted softmax vector is: ") print(predicted) species_dict = {v:k for k,v in Class_dict.items()} print("\nPredicted species is: ") print(species_dict[np.argmax(predicted)])
|