diff --git a/llvm/projects/keras/frontend/approxhpvm_translator.py b/llvm/projects/keras/frontend/approxhpvm_translator.py index 21e109d9ed896a2f9e6e73433d227da20f84895b..77889280bf8cf3cefcd604a820aef9ff78d5ba73 100644 --- a/llvm/projects/keras/frontend/approxhpvm_translator.py +++ b/llvm/projects/keras/frontend/approxhpvm_translator.py @@ -404,7 +404,7 @@ class TensorRtTranslator: - elif layer_type == "Dense": + if layer_type == "Dense": input_var_name = self.getSingleInputName(cur_node) weights = cur_node.weights @@ -416,7 +416,7 @@ class TensorRtTranslator: self.program_str += inst_str - elif self.hasBiasAdd(cur_node): + if self.hasBiasAdd(cur_node): out_var_name2 = self.getVariableName(cur_node) inst_str = "void* " + out_var_name2 + " = " @@ -430,14 +430,14 @@ class TensorRtTranslator: out_var_name1 = out_var_name2 - elif layer_type == "Activation": + if layer_type == "Activation": input_var_name = self.getSingleInputName(cur_node) inst_str = genActivationCallStr(input_var_name, out_var_name1, cur_node.activation_type) self.program_str += inst_str - elif self.hasActivation(cur_node) and layer_type != "Activation": + if self.hasActivation(cur_node) and layer_type != "Activation": activation_type = cur_node.activation_type out_var_name3 = self.getVariableName(cur_node) @@ -449,7 +449,7 @@ class TensorRtTranslator: sys.exit(0) - elif layer_type == "BatchNormalization": + if layer_type == "BatchNormalization": input_var_name = self.getSingleInputName(cur_node) inst_str = "void* " + out_var_name1 + " = " @@ -464,7 +464,7 @@ class TensorRtTranslator: self.program_str += inst_str - elif layer_type == "Add": + if layer_type == "Add": input_vars = self.getMultipleInputNames(cur_node) inst_str = "void* " + out_var_name1 + " = " @@ -472,7 +472,7 @@ class TensorRtTranslator: self.program_str += inst_str - elif layer_type == "MaxPooling2D" or layer_type == "AveragePooling2D": + if layer_type == "MaxPooling2D" or layer_type == "AveragePooling2D": input_var_name = self.getSingleInputName(cur_node) pool_size = cur_node.pool_size @@ -491,8 +491,6 @@ class TensorRtTranslator: inst_str += "," + str(padding) + "," + str(padding) + "," + str(strides[0]) + "," + str(strides[1]) inst_str += "); \n" self.program_str += inst_str - #else: - # print ("ERROR: Operator = ", layer_type, " is NOT currently supported") @@ -771,7 +769,8 @@ class TensorRtTranslator: file_path_str += "labels.bin\"); \n" self.weight_str += file_path_str - self.input_str += "uint8_t* labels = readLabels(" + #self.input_str += "uint8_t* labels = readLabels(" + self.input_str += "uint32_t* labels = readLabels2(" self.input_str += file_path + ".c_str()," + str(test_labels.shape[0]) + "); \n" @@ -805,11 +804,13 @@ class TensorRtTranslator: def endBatchLoop(self): end_loop_str = "" - end_loop_str += "\nuint8_t* labels = readLabelsBatch(labels_path.c_str(),start,end); \n" + #end_loop_str += "\nuint8_t* labels = readLabelsBatch(labels_path.c_str(),start,end); \n" + end_loop_str += "\nuint32_t* labels = readLabelsBatch2(labels_path.c_str(),start,end); \n" last_node = self.dfg.last_node output_var = self.output_map[last_node.layer_name] - accuracy_call = "\nfloat accuracy = computeAccuracy2(labels, batch_size, " + output_var + "); \n" + #accuracy_call = "\nfloat accuracy = computeAccuracy2(labels, batch_size, " + output_var + "); \n" + accuracy_call = "\nfloat accuracy = computeAccuracy3(labels, batch_size, " + output_var + "); \n" end_loop_str += accuracy_call #end_loop_str += "float accuracy = computeAccuracy2(labels, batch_size, var_60); " diff --git a/llvm/projects/keras/frontend/hpvm_dfg_translator.py b/llvm/projects/keras/frontend/hpvm_dfg_translator.py index 066d5d23b90fae0048036b69bcdd7556adec9c92..7467f316e98e36f6edc8924def93a216712d7ab3 100644 --- a/llvm/projects/keras/frontend/hpvm_dfg_translator.py +++ b/llvm/projects/keras/frontend/hpvm_dfg_translator.py @@ -637,7 +637,8 @@ class HPVMTranslator: main_func_str += "hpvm_request_tensor(result, 0); \n\n" main_func_str += "__visc__cleanup(); \n " - main_func_str += "computeAccuracy2(labels, " + str(len(test_data)) + ", result); \n" + #main_func_str += "computeAccuracy2(labels, " + str(len(test_data)) + ", result); \n" + main_func_str += "computeAccuracy3(labels, " + str(len(test_data)) + ", result); \n" main_func_str += "return 0; \n\n" main_func_str += "} \n" diff --git a/llvm/projects/keras/frontend/promise_translator.py b/llvm/projects/keras/frontend/promise_translator.py index ed8f6edb7cade3a2a9e6b813c65873fc4a03d974..f33770d5ad1bcc3166d8cee5586847790efedcd4 100644 --- a/llvm/projects/keras/frontend/promise_translator.py +++ b/llvm/projects/keras/frontend/promise_translator.py @@ -1131,11 +1131,14 @@ class PromiseRtTranslator: def endBatchLoop(self): end_loop_str = "" - end_loop_str += "\nuint8_t* labels = readLabelsBatch(labels_path.c_str(),start,end); \n" + #end_loop_str += "\nuint8_t* labels = readLabelsBatch2(labels_path.c_str(),start,end); \n" + end_loop_str += "\nuint32_t* labels = readLabelsBatch2(labels_path.c_str(),start,end); \n" + last_node = self.dfg.last_node output_var = self.output_map[last_node.layer_name] - accuracy_call = "\nfloat accuracy = computeAccuracy2(labels, batch_size, " + output_var + "); \n" + #accuracy_call = "\nfloat accuracy = computeAccuracy2(labels, batch_size, " + output_var + "); \n" + accuracy_call = "\nfloat accuracy = computeAccuracy3(labels, batch_size, " + output_var + "); \n" end_loop_str += accuracy_call #end_loop_str += "float accuracy = computeAccuracy2(labels, batch_size, var_60); " diff --git a/llvm/projects/keras/frontend/utils.py b/llvm/projects/keras/frontend/utils.py index c4bbe1cde84732f36fcac46e7f5094223bcbcef0..ffc338c19ea60df7c53430ac38b613c2daef402e 100644 --- a/llvm/projects/keras/frontend/utils.py +++ b/llvm/projects/keras/frontend/utils.py @@ -5,16 +5,27 @@ def nodeHasBias(cur_node): if cur_node.layer_type == "Conv2D" or cur_node.layer_type == "DepthwiseConv2D" or cur_node.layer_type == "Dense": + #return True + return cur_node.use_bias + else: + return False + + +def layerHasActivationAttr(cur_node): + + if cur_node.layer_type == "Conv2D" or cur_node.layer_type == "DepthwiseConv2D" \ + or cur_node.layer_type == "Dense" or cur_node.layer_type == "Activation": return True else: return False - + def nodeHasActivation(cur_node): if cur_node.layer_type == "Conv2D" or cur_node.layer_type == "DepthwiseConv2D" \ or cur_node.layer_type == "Dense" or cur_node.layer_type == "Activation": - return True + #return True + return cur_node.activation_type != "linear" else: return False diff --git a/llvm/projects/keras/frontend/weight_utils.py b/llvm/projects/keras/frontend/weight_utils.py index 78504bbf03d727211957f6b3d64db749c7093174..db81fa2d70664daef92e98e7230d842359912723 100644 --- a/llvm/projects/keras/frontend/weight_utils.py +++ b/llvm/projects/keras/frontend/weight_utils.py @@ -12,9 +12,11 @@ def dumpLabels(file_name, Y_test): for label in Y_test: label_val = 0 if len(Y_test.shape) > 1: - label_val = np.int8(label[0]) + #label_val = np.int8(label[0]) + label_val = np.int32(label[0]) else: - label_val = np.int8(label) + #label_val = np.int8(label) + label_val = np.int32(label) if label_val not in labels_map: labels_map[label_val] = 0