From 0b688f371da5420a51acac5a763dc3854956530f Mon Sep 17 00:00:00 2001 From: Hashim Sharif <hsharif3@miranda.cs.illinois.edu> Date: Sun, 31 Jan 2021 19:22:01 -0600 Subject: [PATCH] Starting on Json file generation --- .../keras/frontend/approxhpvm_translator.py | 72 ++++++++++++++++--- hpvm/projects/keras/frontend/knobs.py | 38 ++++++++++ 2 files changed, 99 insertions(+), 11 deletions(-) create mode 100644 hpvm/projects/keras/frontend/knobs.py diff --git a/hpvm/projects/keras/frontend/approxhpvm_translator.py b/hpvm/projects/keras/frontend/approxhpvm_translator.py index 7785e8301f..fd750175c5 100644 --- a/hpvm/projects/keras/frontend/approxhpvm_translator.py +++ b/hpvm/projects/keras/frontend/approxhpvm_translator.py @@ -5,6 +5,7 @@ from frontend.promise_translator import PromiseRtTranslator from frontend.hpvm_dfg_translator import HPVMTranslator from frontend.weight_utils import dumpLabels, dumpData, dumpConvWeights, dumpFcWeights, dumpFcBias from frontend.utils import * +from frontend.knobs import * import keras import os @@ -203,11 +204,15 @@ class TensorRtTranslator: self.weight_str = "" self.program_str = "" self.input_str = "" - self.json_str = "" # Used for Json gen - self.cur_height = 32 # Used for Json gen - self.cur_width = 32 # Used for Json egen - self.op_count = 1 # Used for Json gen self.filter_names = {} + + # Used for Json gen + self.json_str = "" + self.knobs_str = "" + self.cur_height = 32 + self.cur_width = 32 + self.op_count = 1 + @@ -215,7 +220,7 @@ class TensorRtTranslator: self.cur_height = data.shape[2] self.cur_width = data.shape[3] - DEBUG ("cur_height = ", self.cur_height, " cur_width = ", self.cur_width, "\n") + DEBUG ("cur_height = ", self.cur_height, " cur_width = ", self.cur_width, ", \n") def addConvOverheads(self, weights, padding, strides): @@ -228,7 +233,8 @@ class TensorRtTranslator: flops = H_d * W_d * K_d DEBUG ("conv_flops = ", flops) - self.json_str += "op" + str(self.op_count) + " : " + str(flops) + "\n" + self.json_str += "convolution_" + str(self.op_count) + " : " + str(flops) + ", \n" + self.knobs_str += "convolution_" + str(self.op_count) + " : [" + conv_knobs + "], \n" self.op_count += 1 self.cur_height = self.cur_height / strides[0] @@ -242,7 +248,8 @@ class TensorRtTranslator: flops = weights.shape[0] * weights.shape[1] DEBUG ("dense_flops = ", flops) - self.json_str += "op" + str(self.op_count) + " : " + str(flops) + "\n" + self.json_str += "linear_" + str(self.op_count) + " : " + str(flops) + "\n" + self.knobs_str += "linear_" + str(self.op_count) + " : [" + baseline_knobs + "], \n" self.op_count += 1 self.cur_height = 1 @@ -258,6 +265,14 @@ class TensorRtTranslator: DEBUG ("cur_height = ", self.cur_height, " cur_width = ", self.cur_width, "\n") + + def addBaselineKnob(self, op_name): + + self.json_str += op_name + "_" + str(self.op_count) + " : 0, \n" + self.knobs_str += op_name + "_" + str(self.op_count) + " : [" + baseline_knobs + "], \n" + self.op_count += 1 + + def getWeightStr(self): @@ -451,7 +466,12 @@ class TensorRtTranslator: if layer_type == "Conv2D": self.addConvOverheads(weights, padding, strides) - + elif layer_type == "DepthwiseConv2D": + #self.json_str += "depthwise_convolution_" + str(self.op_count) + " : 0, \n" + #self.op_count += 1 + self.addBaselineKnob("depthwise_convolution") + + if layer_type == "Dense": input_var_name = self.getSingleInputName(cur_node) @@ -481,13 +501,21 @@ class TensorRtTranslator: # NOTE: Changing output variable out_var_name1 = out_var_name2 + #self.json_str += "add_" + str(self.op_count) + " : 0, \n" + # self.op_count += 1 + self.addBaselineKnob("add") + if layer_type == "Activation": input_var_name = self.getSingleInputName(cur_node) inst_str = genActivationCallStr(input_var_name, out_var_name1, cur_node.activation_type) self.program_str += inst_str - + + #self.json_str += cur_node.activation_type + "_" + str(self.op_count) + " : 0, \n" + #self.op_count += 1 + self.addBaselineKnob(cur_node.activation_type) + if self.hasActivation(cur_node) and layer_type != "Activation": activation_type = cur_node.activation_type @@ -499,7 +527,11 @@ class TensorRtTranslator: if activation_type == "softmax": print ("Softmax canNOT be part of Dense/Conv Op. Insert: Activation('softmax');") sys.exit(0) - + + #self.json_str += activation_type + "_" + str(self.op_count) + " : 0, \n" + #self.op_count += 1 + self.addBaselineKnob(activation_type) + if layer_type == "BatchNormalization": input_var_name = self.getSingleInputName(cur_node) @@ -514,6 +546,11 @@ class TensorRtTranslator: inst_str += "); \n" self.program_str += inst_str + + #self.json_str += "batchnorm_" + str(self.op_count) + " : 0, \n" + #self.op_count += 1 + self.addBaselineKnob("batchnorm") + if layer_type == "Add": @@ -523,6 +560,10 @@ class TensorRtTranslator: inst_str += "tensorAdd(" + input_vars[0] + ", " + input_vars[1] + "); \n" self.program_str += inst_str + #self.json_str += "add_" + str(self.op_count) + " : 0, \n" + #self.op_count += 1 + self.addBaselineKnob("add") + if layer_type == "MaxPooling2D" or layer_type == "AveragePooling2D": input_var_name = self.getSingleInputName(cur_node) @@ -534,8 +575,16 @@ class TensorRtTranslator: pool_type = 0 if layer_type == "MaxPooling2D": pool_type = "0" + #self.json_str += "maxpool_" + str(self.op_count) + " : 0, \n" + #self.op_count += 1 + self.addBaselineKnob("maxpool") + if layer_type == "AveragePooling2D": - pool_type = "1" + pool_type = "1" + #self.json_str += "avgpool_" + str(self.op_count) + " : 0, \n" + #self.op_count += 1 + self.addBaselineKnob("avgpool") + # tensorPooling(input, pool_type, pool_h, pool_w, v_pad, h_pad, v_stride, h_stride) inst_str = "void* " + out_var_name1 + " = " @@ -901,6 +950,7 @@ class TensorRtTranslator: f = open(dir_prefix + "/tuner.json", "w+") f.write(self.json_str) + f.write(self.knobs_str) f.close() diff --git a/hpvm/projects/keras/frontend/knobs.py b/hpvm/projects/keras/frontend/knobs.py new file mode 100644 index 0000000000..291221acb5 --- /dev/null +++ b/hpvm/projects/keras/frontend/knobs.py @@ -0,0 +1,38 @@ + +knobs_speedups = {} +knobs_speedups[11] = 1 +knobs_speedups[12] = 1.5 +knobs_speedups[151] = 3 +knobs_speedups[152] = 3 +knobs_speedups[153] = 3 +knobs_speedups[154] = 3 +knobs_speedups[155] = 2.25 +knobs_speedups[156] = 2.25 +knobs_speedups[157] = 2.25 +knobs_speedups[158] = 2.25 +knobs_speedups[159] = 2.25 +knobs_speedups[160] = 2.25 +knobs_speedups[161] = 2 +knobs_speedups[162] = 2 +knobs_speedups[163] = 2 +knobs_speedups[164] = 2 +knobs_speedups[165] = 2 +knobs_speedups[166] = 2 +knobs_speedups[167] = 2 +knobs_speedups[168] = 2 +knobs_speedups[261] = 3 +knobs_speedups[262] = 3 +knobs_speedups[263] = 2.25 +knobs_speedups[264] = 2.25 +knobs_speedups[265] = 2.25 +knobs_speedups[266] = 2 +knobs_speedups[267] = 2 +knobs_speedups[268] = 2 +knobs_speedups[269] = 2 + + +conv_knobs = "12, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 261, 262, 263, 264, 265, 266, 267, 268, 269" + +baseline_knobs = "12" + + -- GitLab