From f28b8bd7c4a42b651db328aaba3e9b38c82b662f Mon Sep 17 00:00:00 2001
From: Hashim Sharif <hsharif3@tyler.cs.illinois.edu>
Date: Mon, 27 Jul 2020 17:22:00 -0500
Subject: [PATCH] Moving AlexNet2 to new Benchmark Structure

---
 llvm/projects/keras/frontend/weight_utils.py |   2 +-
 llvm/projects/keras/src/alexnet2.py          | 291 +++++++------------
 2 files changed, 102 insertions(+), 191 deletions(-)

diff --git a/llvm/projects/keras/frontend/weight_utils.py b/llvm/projects/keras/frontend/weight_utils.py
index 9d5bf75d6d..644a4842d3 100644
--- a/llvm/projects/keras/frontend/weight_utils.py
+++ b/llvm/projects/keras/frontend/weight_utils.py
@@ -196,6 +196,6 @@ def dumpHPVMToKerasModel(model, reload_dir, output_model, X_test, Y_test):
                 optimizer=Adam(lr=0.0001, decay=1e-6),
                 metrics=['accuracy'])    
 
-  model.save("alexnet.h5")
+  model.save(output_model)
 
   return model
diff --git a/llvm/projects/keras/src/alexnet2.py b/llvm/projects/keras/src/alexnet2.py
index 812b212165..e29917b26f 100644
--- a/llvm/projects/keras/src/alexnet2.py
+++ b/llvm/projects/keras/src/alexnet2.py
@@ -1,4 +1,5 @@
 
+import sys
 import keras
 from keras.models import Sequential
 from keras.utils import np_utils
@@ -11,233 +12,143 @@ from keras.callbacks import LearningRateScheduler
 import numpy as np
 import os
 import struct
+from Benchmark import Benchmark
 from keras import backend as K
-from approxhpvm_translator import translate_to_approxhpvm
+from frontend.approxhpvm_translator import translate_to_approxhpvm
 
 
 
-def dumpWeights(file_name, weights, N, H, W, C):
-    # NOTE: Writing the NHWC weights array as NCHW
-    f = open(file_name, "wb")
-    for i in range(N):
-        for j in range(C):
-            for k in range(H):
-                for l in range(W):
-                    f.write(weights[i][k][l][j])
+class AlexNet2(Benchmark):
 
-    f.close()
 
-    
-def dumpConvWeights(file_name, weights, N, C, H, W):
 
-    print (weights.shape)
-    
-    f = open(file_name, "wb")
-    for i in range(N):
-        for j in range(C):
-            for k in range(H):
-                for l in range(W):
-                    f.write(weights[k][l][j][i])
-    f.close()
+  def lr_schedule2(self, epoch):
+    lrate = 0.0005
+    if epoch > 100:
+      lrate = 0.0003
+    if epoch > 200:
+      lrate = 0.0002
+    if epoch > 250:
+      lrate = 0.0001
+    if epoch > 300:
+      lrate = 0.00003
 
+    return lrate
 
     
-def dumpFcWeights(file_name, weights, H, W):
 
-    print (weights.shape)
+  def buildModel(self):
 
-    f = open(file_name, "wb")
-    for i in range(H):
-        for j in range(W):
-            f.write(weights[i][j])
-    f.close()        
+      weight_decay = 1e-4  
+      activation_type = 'tanh'
 
+      model = Sequential()
+      model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay), input_shape=(3, 32, 32) ))
+      model.add(Activation(activation_type))
+      model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
+      model.add(Activation(activation_type))
+      model.add(MaxPooling2D(pool_size=(2,2)))
+      model.add(Dropout(0.2))
 
-def dumpFcBias(file_name, bias, W):
+      model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
+      model.add(Activation(activation_type))
+      model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
+      model.add(Activation(activation_type))
+      model.add(MaxPooling2D(pool_size=(2,2)))
+      model.add(Dropout(0.3))
 
-    print (bias.shape)
+      model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
+      model.add(Activation(activation_type))
+      model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
+      model.add(Activation(activation_type))
+      model.add(MaxPooling2D(pool_size=(2,2)))
+      model.add(Dropout(0.4))
 
-    f = open(file_name, "wb")
-    for i in range(W):
-        f.write(bias[i])
-    f.close()
+      model.add(Flatten())
+      model.add(Dense(self.num_classes))
+      model.add(Activation('softmax'))
+      model.summary()
 
+      return model
 
-def dumpLabels(file_name, Y_test):
 
-    f = open(file_name, "wb")
-    
-    labels_map = {}    
-    for label in Y_test:
-        label_val = np.int8(label[0])
-        if label_val not in labels_map:
-            labels_map[label_val] = 0
-        labels_map[label_val] += 1
 
-        f.write(label_val)
+  def trainModel(self, model):
 
-    f.close()
-    
+        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
 
-    
-def dumpData(X_test, file_name, N, C, H, W):
+        test_labels = y_test
+        x_train = x_train.astype('float32')
+        x_test = x_test.astype('float32')
 
-    print (X_test.shape)
-    
-    f = open(file_name, "wb")
-    for i in range(N):
-        for j in range(C):
-            for k in range(H):
-                for l in range(W):
-                    val = struct.unpack("f", struct.pack("f", X_test[i][j][k][l]))
-                    f.write(np.float32(val[0]))
+        #z-score
+        mean = np.mean(x_train,axis=(0,1,2,3))
+        std = np.std(x_train,axis=(0,1,2,3))
+        x_train = (x_train-mean)/(std+1e-7)
+        x_test = (x_test-mean)/(std+1e-7)
 
-    f.close()
+        y_train = np_utils.to_categorical(y_train, self.num_classes)
+        y_test = np_utils.to_categorical(y_test, self.num_classes)
 
+        #data augmentation
+        datagen = ImageDataGenerator(
+          rotation_range=15,
+          width_shift_range=0.1,
+          height_shift_range=0.1,
+          horizontal_flip=True,
+          )
 
+        datagen.fit(x_train)
 
-    
+        #training
+        batch_size = 64        
+        opt_rms = keras.optimizers.rmsprop(lr=0.001,decay=1e-6)
+        
+        model.compile(loss='categorical_crossentropy', optimizer=opt_rms, metrics=['accuracy'])
 
-def lr_schedule(epoch):
-  lrate = 0.001
-  if epoch > 75:
-    lrate = 0.0005
-  if epoch > 100:
-    lrate = 0.0003
-  return lrate
+        model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),\
+                      steps_per_epoch=x_train.shape[0] // batch_size, #epochs=350,\
+                      epochs=3,
+                      verbose=1,validation_data=(x_test,y_test), \
+                            callbacks=[LearningRateScheduler(self.lr_schedule2)])
 
+        return model
 
-def lr_schedule2(epoch):
-  lrate = 0.0005
-  if epoch > 100:
-    lrate = 0.0003
-  if epoch > 200:
-    lrate = 0.0002
-  if epoch > 250:
-    lrate = 0.0001
-  if epoch > 300:
-    lrate = 0.00003
 
-  return lrate
 
+    
+  def data_preprocess(self):
 
-K.set_image_data_format('channels_first')
+      (x_train, y_train), (x_test, y_test) = cifar10.load_data()
+      
+      x_train = x_train.astype('float32')
+      x_test = x_test.astype('float32')
 
-(x_train, y_train), (x_test, y_test) = cifar10.load_data()
-test_labels = y_test
-x_train = x_train.astype('float32')
-x_test = x_test.astype('float32')
+      #z-score
+      mean = np.mean(x_train,axis=(0,1,2,3))
+      std = np.std(x_train,axis=(0,1,2,3))
+      x_train = (x_train-mean)/(std+1e-7)
+      x_test = (x_test-mean)/(std+1e-7)
 
-#z-score
-mean = np.mean(x_train,axis=(0,1,2,3))
-std = np.std(x_train,axis=(0,1,2,3))
-x_train = (x_train-mean)/(std+1e-7)
-x_test = (x_test-mean)/(std+1e-7)
-
-
-# Dumping test data and test labels
-dir_prefix = "/home/hsharif3/Gitlab/hpvm/llvm/projects/hpvm-tensor-rt/model_params/alexnet2_cifar10/"
-
-dumpLabels(dir_prefix + "test_labels.bin", y_test)
-dumpData(x_test, dir_prefix + "norm_cifar_input.bin", 10000, 3, 32, 32)
-
-
-
-num_classes = 10
-y_train = np_utils.to_categorical(y_train,num_classes)
-y_test = np_utils.to_categorical(y_test,num_classes)
-
-weight_decay = 1e-4
-activation_type = 'tanh'
-
-
-os.environ["CUDA_VISIBLE_DEVICES"] = "0"
-
-
-model = Sequential()
-model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay), input_shape=x_train.shape[1:]))
-model.add(Activation(activation_type))
-#model.add(BatchNormalization())
-model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
-model.add(Activation(activation_type))
-#model.add(BatchNormalization())
-model.add(MaxPooling2D(pool_size=(2,2)))
-model.add(Dropout(0.2))
-
-model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
-model.add(Activation(activation_type))
-#model.add(BatchNormalization())
-model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
-model.add(Activation(activation_type))
-#model.add(BatchNormalization())
-model.add(MaxPooling2D(pool_size=(2,2)))
-model.add(Dropout(0.3))
-
-model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
-model.add(Activation(activation_type))
-#model.add(BatchNormalization())
-model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
-model.add(Activation(activation_type))
-#model.add(BatchNormalization())
-model.add(MaxPooling2D(pool_size=(2,2)))
-model.add(Dropout(0.4))
-
-model.add(Flatten())
-model.add(Dense(num_classes))
-model.add(Activation('softmax'))
-model.summary()
-
-#data augmentation
-datagen = ImageDataGenerator(
-        rotation_range=15,
-        width_shift_range=0.1,
-        height_shift_range=0.1,
-        horizontal_flip=True,
-        )
-
-datagen.fit(x_train)
-
-
-#training
-batch_size = 64
-
-opt_rms = keras.optimizers.rmsprop(lr=0.001,decay=1e-6)
-model.compile(loss='categorical_crossentropy', optimizer=opt_rms, metrics=['accuracy'])
-model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),\
-                    steps_per_epoch=x_train.shape[0] // batch_size, #epochs=350,\
-                    epochs=1,
-                    verbose=1,validation_data=(x_test,y_test),callbacks=[LearningRateScheduler(lr_schedule2)])
-#save to disk
-model_json = model.to_json()
-with open('model.json', 'w') as json_file:
-  json_file.write(model_json)
-  model.save_weights('model.h5') 
+      return x_train, y_train, x_test, y_test
 
-#testing
-scores = model.evaluate(x_test, y_test, batch_size=128, verbose=1)
-print('\nTest result: %.3f loss: %.3f' % (scores[1]*100,scores[0]))
+  
 
 
-translate_to_approxhpvm(model, "alexnet2_cifar10_test/", x_test, test_labels, "alexnet2_cifar10/", y_test)
-sys.exit(0)
+if __name__ == "__main__":
 
+      
+    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
+    # Changing to NCHW format
+    K.set_image_data_format('channels_first')
 
-
-params = model.get_weights()
-dumpConvWeights(dir_prefix + "conv1.bin", params[0], 32, 3, 3, 3)
-dumpFcBias(dir_prefix + "conv1_bias.bin", params[1], 32)
-dumpConvWeights(dir_prefix + "conv2.bin", params[2], 32, 32, 3, 3)
-dumpFcBias(dir_prefix + "conv2_bias.bin", params[3], 32)
-dumpConvWeights(dir_prefix + "conv3.bin", params[4], 64, 32, 3, 3)
-dumpFcBias(dir_prefix + "conv3_bias.bin", params[5], 64)
-dumpConvWeights(dir_prefix +  "conv4.bin", params[6], 64, 64, 3, 3)
-dumpFcBias(dir_prefix + "conv4_bias.bin", params[7], 64)
-dumpConvWeights(dir_prefix +  "conv5.bin", params[8], 128, 64, 3, 3)
-dumpFcBias(dir_prefix + "conv5_bias.bin", params[9], 128)
-dumpConvWeights(dir_prefix + "conv6.bin", params[10], 128, 128, 3, 3)
-dumpFcBias(dir_prefix + "conv6_bias.bin", params[11], 128)
-
-dumpFcWeights(dir_prefix +  "fc1.bin", params[12], 2048, 10)
-dumpFcBias(dir_prefix +  "fc1_bias.bin", params[13], 10)
-  
-
+
+    ### Parameters specific to each benchmark
+    reload_dir = "/home/hsharif3/Gitlab/hpvm/llvm/projects/hpvm-tensor-rt/model_params/alexnet2_cifar10/"
+    keras_model_file = "alexnet2.h5"
+    hpvm_dir = "data/alexnet2_cifar10/" 
+    num_classes = 10
+
+    alexnet2 = AlexNet2("AlexNet2", reload_dir, keras_model_file, hpvm_dir, num_classes)
+    
+    alexnet2.run(sys.argv)
-- 
GitLab