Skip to content
Snippets Groups Projects
Commit b7fcf0f3 authored by Hashim Sharif's avatar Hashim Sharif
Browse files

Moving to int32 labels - previously int8

parent 392651c5
No related branches found
No related tags found
No related merge requests found
...@@ -404,7 +404,7 @@ class TensorRtTranslator: ...@@ -404,7 +404,7 @@ class TensorRtTranslator:
elif layer_type == "Dense": if layer_type == "Dense":
input_var_name = self.getSingleInputName(cur_node) input_var_name = self.getSingleInputName(cur_node)
weights = cur_node.weights weights = cur_node.weights
...@@ -416,7 +416,7 @@ class TensorRtTranslator: ...@@ -416,7 +416,7 @@ class TensorRtTranslator:
self.program_str += inst_str self.program_str += inst_str
elif self.hasBiasAdd(cur_node): if self.hasBiasAdd(cur_node):
out_var_name2 = self.getVariableName(cur_node) out_var_name2 = self.getVariableName(cur_node)
inst_str = "void* " + out_var_name2 + " = " inst_str = "void* " + out_var_name2 + " = "
...@@ -430,14 +430,14 @@ class TensorRtTranslator: ...@@ -430,14 +430,14 @@ class TensorRtTranslator:
out_var_name1 = out_var_name2 out_var_name1 = out_var_name2
elif layer_type == "Activation": if layer_type == "Activation":
input_var_name = self.getSingleInputName(cur_node) input_var_name = self.getSingleInputName(cur_node)
inst_str = genActivationCallStr(input_var_name, out_var_name1, cur_node.activation_type) inst_str = genActivationCallStr(input_var_name, out_var_name1, cur_node.activation_type)
self.program_str += inst_str self.program_str += inst_str
elif self.hasActivation(cur_node) and layer_type != "Activation": if self.hasActivation(cur_node) and layer_type != "Activation":
activation_type = cur_node.activation_type activation_type = cur_node.activation_type
out_var_name3 = self.getVariableName(cur_node) out_var_name3 = self.getVariableName(cur_node)
...@@ -449,7 +449,7 @@ class TensorRtTranslator: ...@@ -449,7 +449,7 @@ class TensorRtTranslator:
sys.exit(0) sys.exit(0)
elif layer_type == "BatchNormalization": if layer_type == "BatchNormalization":
input_var_name = self.getSingleInputName(cur_node) input_var_name = self.getSingleInputName(cur_node)
inst_str = "void* " + out_var_name1 + " = " inst_str = "void* " + out_var_name1 + " = "
...@@ -464,7 +464,7 @@ class TensorRtTranslator: ...@@ -464,7 +464,7 @@ class TensorRtTranslator:
self.program_str += inst_str self.program_str += inst_str
elif layer_type == "Add": if layer_type == "Add":
input_vars = self.getMultipleInputNames(cur_node) input_vars = self.getMultipleInputNames(cur_node)
inst_str = "void* " + out_var_name1 + " = " inst_str = "void* " + out_var_name1 + " = "
...@@ -472,7 +472,7 @@ class TensorRtTranslator: ...@@ -472,7 +472,7 @@ class TensorRtTranslator:
self.program_str += inst_str self.program_str += inst_str
elif layer_type == "MaxPooling2D" or layer_type == "AveragePooling2D": if layer_type == "MaxPooling2D" or layer_type == "AveragePooling2D":
input_var_name = self.getSingleInputName(cur_node) input_var_name = self.getSingleInputName(cur_node)
pool_size = cur_node.pool_size pool_size = cur_node.pool_size
...@@ -491,8 +491,6 @@ class TensorRtTranslator: ...@@ -491,8 +491,6 @@ class TensorRtTranslator:
inst_str += "," + str(padding) + "," + str(padding) + "," + str(strides[0]) + "," + str(strides[1]) inst_str += "," + str(padding) + "," + str(padding) + "," + str(strides[0]) + "," + str(strides[1])
inst_str += "); \n" inst_str += "); \n"
self.program_str += inst_str self.program_str += inst_str
#else:
# print ("ERROR: Operator = ", layer_type, " is NOT currently supported")
...@@ -771,7 +769,8 @@ class TensorRtTranslator: ...@@ -771,7 +769,8 @@ class TensorRtTranslator:
file_path_str += "labels.bin\"); \n" file_path_str += "labels.bin\"); \n"
self.weight_str += file_path_str self.weight_str += file_path_str
self.input_str += "uint8_t* labels = readLabels(" #self.input_str += "uint8_t* labels = readLabels("
self.input_str += "uint32_t* labels = readLabels2("
self.input_str += file_path + ".c_str()," + str(test_labels.shape[0]) + "); \n" self.input_str += file_path + ".c_str()," + str(test_labels.shape[0]) + "); \n"
...@@ -805,11 +804,13 @@ class TensorRtTranslator: ...@@ -805,11 +804,13 @@ class TensorRtTranslator:
def endBatchLoop(self): def endBatchLoop(self):
end_loop_str = "" end_loop_str = ""
end_loop_str += "\nuint8_t* labels = readLabelsBatch(labels_path.c_str(),start,end); \n" #end_loop_str += "\nuint8_t* labels = readLabelsBatch(labels_path.c_str(),start,end); \n"
end_loop_str += "\nuint32_t* labels = readLabelsBatch2(labels_path.c_str(),start,end); \n"
last_node = self.dfg.last_node last_node = self.dfg.last_node
output_var = self.output_map[last_node.layer_name] output_var = self.output_map[last_node.layer_name]
accuracy_call = "\nfloat accuracy = computeAccuracy2(labels, batch_size, " + output_var + "); \n" #accuracy_call = "\nfloat accuracy = computeAccuracy2(labels, batch_size, " + output_var + "); \n"
accuracy_call = "\nfloat accuracy = computeAccuracy3(labels, batch_size, " + output_var + "); \n"
end_loop_str += accuracy_call end_loop_str += accuracy_call
#end_loop_str += "float accuracy = computeAccuracy2(labels, batch_size, var_60); " #end_loop_str += "float accuracy = computeAccuracy2(labels, batch_size, var_60); "
......
...@@ -637,7 +637,8 @@ class HPVMTranslator: ...@@ -637,7 +637,8 @@ class HPVMTranslator:
main_func_str += "hpvm_request_tensor(result, 0); \n\n" main_func_str += "hpvm_request_tensor(result, 0); \n\n"
main_func_str += "__visc__cleanup(); \n " main_func_str += "__visc__cleanup(); \n "
main_func_str += "computeAccuracy2(labels, " + str(len(test_data)) + ", result); \n" #main_func_str += "computeAccuracy2(labels, " + str(len(test_data)) + ", result); \n"
main_func_str += "computeAccuracy3(labels, " + str(len(test_data)) + ", result); \n"
main_func_str += "return 0; \n\n" main_func_str += "return 0; \n\n"
main_func_str += "} \n" main_func_str += "} \n"
......
...@@ -1131,11 +1131,14 @@ class PromiseRtTranslator: ...@@ -1131,11 +1131,14 @@ class PromiseRtTranslator:
def endBatchLoop(self): def endBatchLoop(self):
end_loop_str = "" end_loop_str = ""
end_loop_str += "\nuint8_t* labels = readLabelsBatch(labels_path.c_str(),start,end); \n" #end_loop_str += "\nuint8_t* labels = readLabelsBatch2(labels_path.c_str(),start,end); \n"
end_loop_str += "\nuint32_t* labels = readLabelsBatch2(labels_path.c_str(),start,end); \n"
last_node = self.dfg.last_node last_node = self.dfg.last_node
output_var = self.output_map[last_node.layer_name] output_var = self.output_map[last_node.layer_name]
accuracy_call = "\nfloat accuracy = computeAccuracy2(labels, batch_size, " + output_var + "); \n" #accuracy_call = "\nfloat accuracy = computeAccuracy2(labels, batch_size, " + output_var + "); \n"
accuracy_call = "\nfloat accuracy = computeAccuracy3(labels, batch_size, " + output_var + "); \n"
end_loop_str += accuracy_call end_loop_str += accuracy_call
#end_loop_str += "float accuracy = computeAccuracy2(labels, batch_size, var_60); " #end_loop_str += "float accuracy = computeAccuracy2(labels, batch_size, var_60); "
......
...@@ -5,16 +5,27 @@ ...@@ -5,16 +5,27 @@
def nodeHasBias(cur_node): def nodeHasBias(cur_node):
if cur_node.layer_type == "Conv2D" or cur_node.layer_type == "DepthwiseConv2D" or cur_node.layer_type == "Dense": if cur_node.layer_type == "Conv2D" or cur_node.layer_type == "DepthwiseConv2D" or cur_node.layer_type == "Dense":
#return True
return cur_node.use_bias
else:
return False
def layerHasActivationAttr(cur_node):
if cur_node.layer_type == "Conv2D" or cur_node.layer_type == "DepthwiseConv2D" \
or cur_node.layer_type == "Dense" or cur_node.layer_type == "Activation":
return True return True
else: else:
return False return False
def nodeHasActivation(cur_node): def nodeHasActivation(cur_node):
if cur_node.layer_type == "Conv2D" or cur_node.layer_type == "DepthwiseConv2D" \ if cur_node.layer_type == "Conv2D" or cur_node.layer_type == "DepthwiseConv2D" \
or cur_node.layer_type == "Dense" or cur_node.layer_type == "Activation": or cur_node.layer_type == "Dense" or cur_node.layer_type == "Activation":
return True #return True
return cur_node.activation_type != "linear"
else: else:
return False return False
......
...@@ -12,9 +12,11 @@ def dumpLabels(file_name, Y_test): ...@@ -12,9 +12,11 @@ def dumpLabels(file_name, Y_test):
for label in Y_test: for label in Y_test:
label_val = 0 label_val = 0
if len(Y_test.shape) > 1: if len(Y_test.shape) > 1:
label_val = np.int8(label[0]) #label_val = np.int8(label[0])
label_val = np.int32(label[0])
else: else:
label_val = np.int8(label) #label_val = np.int8(label)
label_val = np.int32(label)
if label_val not in labels_map: if label_val not in labels_map:
labels_map[label_val] = 0 labels_map[label_val] = 0
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment