Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
H
hpvm-release
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
llvm
hpvm-release
Commits
e841d95c
Commit
e841d95c
authored
4 years ago
by
Hashim Sharif
Committed by
Yifan Zhao
4 years ago
Browse files
Options
Downloads
Patches
Plain Diff
Removing some dead code from tensorUtils.h
parent
f76a605d
No related branches found
Branches containing commit
No related tags found
Tags containing commit
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
hpvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensorUtils.h
+5
-178
5 additions, 178 deletions
...jects/hpvm-tensor-rt/tensor_runtime/include/tensorUtils.h
with
5 additions
and
178 deletions
hpvm/projects/hpvm-tensor-rt/tensor_runtime/include/tensorUtils.h
+
5
−
178
View file @
e841d95c
...
@@ -148,7 +148,7 @@ void printTensorDims(void *tensor_ptr) {
...
@@ -148,7 +148,7 @@ void printTensorDims(void *tensor_ptr) {
struct
Tensor
*
tensor
=
(
struct
Tensor
*
)
tensor_ptr
;
struct
Tensor
*
tensor
=
(
struct
Tensor
*
)
tensor_ptr
;
printf
(
"Num_elems = %lu
\n
"
,
tensor
->
num_elems
);
printf
(
"Num_elems = %lu
\n
"
,
tensor
->
num_elems
);
for
(
int
i
=
0
;
i
<
tensor
->
dims
.
num_dims
;
i
++
)
{
for
(
unsigned
int
i
=
0
;
i
<
tensor
->
dims
.
num_dims
;
i
++
)
{
printf
(
"dim[%d] = %lu
\n
"
,
i
,
tensor
->
dims
.
dim_sizes
[
i
]);
printf
(
"dim[%d] = %lu
\n
"
,
i
,
tensor
->
dims
.
dim_sizes
[
i
]);
}
}
}
}
...
@@ -188,79 +188,8 @@ void compareValues(void *tensor_ptr, float *data, size_t num_elems) {
...
@@ -188,79 +188,8 @@ void compareValues(void *tensor_ptr, float *data, size_t num_elems) {
}
}
/*
void *readInputTensor(const char *file_name, int data_type,
int dim1_size, int dim2_size, int dim3_size, int dim4_size) {
long int type_size = 4; // NOTE: Assuming floating point tensors
long int num_elems = dim1_size * dim2_size * dim3_size * dim4_size;
long int size_in_bytes = type_size * dim1_size * dim2_size * dim3_size * dim4_size;
uint8_t *file_data = (uint8_t *)malloc(sizeof(char) * num_elems);
float *tensor_data = (float *)malloc(sizeof(float) * num_elems);
long int file_header_size = 16;
FILE *file = fopen(file_name, "rb");
if (file == NULL) {
printf("Data file %s is not found. Aborting... \n", file_name);
abort();
}
fseek(file, file_header_size, SEEK_CUR); // Skipping the file header
size_t bytes_read = fread(file_data, 1, sizeof(uint8_t) * num_elems, file);
fclose(file);
for (size_t i = 0; i < num_elems; ++i) {
tensor_data[i] = (float)file_data[i] / 255.0f;
}
// NOTE: Using NCHW format
struct Tensor *input = (struct Tensor *)create4DTensor(
data_type, nchw, dim1_size, dim2_size, dim3_size, dim4_size);
initTensorData(input, tensor_data, size_in_bytes);
// compareValues(input, tensor_data, num_elems);
return input;
}
*/
//*** FIXIT: Move this to CPU-only
struct
Tensor
*
readTrainedWeightsCPU
(
const
char
*
file_name
,
int
data_type
,
int
dim1_size
,
int
dim2_size
,
int
dim3_size
,
int
dim4_size
)
{
// FIXIT: Don't assume floating point types
int
type_size
=
4
;
// NOTE: Assuming floating point tensors
long
int
num_elems
=
dim1_size
*
dim2_size
*
dim3_size
*
dim4_size
;
long
int
size_in_bytes
=
type_size
*
dim1_size
*
dim2_size
*
dim3_size
*
dim4_size
;
float
*
tensor_data
=
(
float
*
)
malloc
(
sizeof
(
float
)
*
num_elems
);
int
file_header_size
=
0
;
FILE
*
file
=
fopen
(
file_name
,
"rb"
);
if
(
file
==
NULL
)
{
printf
(
"Data file %s is not found. Aborting...
\n
"
,
file_name
);
abort
();
}
fseek
(
file
,
file_header_size
,
SEEK_CUR
);
// Skipping the file header
size_t
bytes_read
=
fread
(
tensor_data
,
1
,
size_in_bytes
,
file
);
printf
(
"size in bytes = %lu, bytes read = %lu
\n
"
,
size_in_bytes
,
bytes_read
);
fclose
(
file
);
struct
Tensor
*
weights
=
(
struct
Tensor
*
)
create4DTensor
(
data_type
,
nchw
,
dim1_size
,
dim2_size
,
dim3_size
,
dim4_size
);
initTensorData
(
weights
,
tensor_data
,
size_in_bytes
);
// compareValues(weights, tensor_data, num_elems);
free
(
tensor_data
);
return
weights
;
}
struct
Tensor
*
readTrainedWeights
(
const
char
*
file_name
,
int
data_type
,
struct
Tensor
*
readTrainedWeights
(
const
char
*
file_name
,
int
data_type
,
long
int
dim1_size
,
long
int
dim2_size
,
long
int
dim1_size
,
long
int
dim2_size
,
...
@@ -368,27 +297,6 @@ uint32_t *readLabels3(const char *labels_file, int num_labels) {
...
@@ -368,27 +297,6 @@ uint32_t *readLabels3(const char *labels_file, int num_labels) {
return
labels
;
return
labels
;
}
}
uint8_t
*
readLabelsBatch
(
const
char
*
labels_file
,
int
start
,
int
end
)
{
int
num_labels
=
end
-
start
;
int
file_header_size
=
sizeof
(
uint8_t
)
*
start
;
uint8_t
*
labels
=
(
uint8_t
*
)
malloc
(
sizeof
(
uint8_t
)
*
num_labels
);
FILE
*
file
=
fopen
(
labels_file
,
"rb"
);
if
(
file
==
NULL
)
{
printf
(
"Data file %s is not found. Aborting...
\n
"
,
labels_file
);
abort
();
}
fseek
(
file
,
file_header_size
,
SEEK_SET
);
// Skipping the file header
size_t
bytes_read
=
fread
(
labels
,
1
,
sizeof
(
uint8_t
)
*
num_labels
,
file
);
fclose
(
file
);
// printf("--labels bytes_read = %lu \n", bytes_read);
return
labels
;
}
uint32_t
*
readLabelsBatch3
(
const
char
*
labels_file
,
int
start
,
int
end
)
{
uint32_t
*
readLabelsBatch3
(
const
char
*
labels_file
,
int
start
,
int
end
)
{
...
@@ -412,88 +320,6 @@ uint32_t *readLabelsBatch3(const char *labels_file, int start, int end) {
...
@@ -412,88 +320,6 @@ uint32_t *readLabelsBatch3(const char *labels_file, int start, int end) {
}
}
/*void computeAccuracy(const char *labels_file, int num_labels,
void *result_ptr) {
struct Tensor *result = (struct Tensor *)result_ptr;
uint8_t *labels = readLabels(labels_file, num_labels);
size_t batch_dim = result->dims.dim_sizes[0];
size_t channels = result->dims.dim_sizes[1];
float *data = (float *)result->host_data;
int num_errors = 0;
for (int i = 0; i < batch_dim; i++) {
int chosen = 0;
for (int id = 1; id < 10; ++id) {
if (data[i * channels + chosen] < data[i * channels + id])
chosen = id;
}
// printf("chosen = %d, label = %d \n", chosen, labels[i]);
if (chosen != labels[i])
num_errors++;
}
float accuracy = ((batch_dim - num_errors) * 1.0 / batch_dim * 1.0) * 100.0;
printf("****** Accuracy = %f \n\n", accuracy);
FILE *fp = fopen("final_accuracy", "w+");
if (fp != NULL) {
std::ostringstream ss;
ss << std::fixed << accuracy;
std::string print_str = ss.str();
fwrite(print_str.c_str(), 1, print_str.length(), fp);
fclose(fp);
}
}
*/
// NOTE: batch_size and num_classes are Unused arguments
float
computeAccuracy2
(
uint8_t
*
labels
,
int
batch_size
,
void
*
result_ptr
,
size_t
num_classes
=
10
)
{
struct
Tensor
*
result
=
(
struct
Tensor
*
)
result_ptr
;
size_t
batch_dim
=
result
->
dims
.
dim_sizes
[
0
];
num_classes
=
result
->
dims
.
dim_sizes
[
1
];
float
*
data
=
(
float
*
)
result
->
host_data
;
int
num_errors
=
0
;
printf
(
"batch_dim = %lu, channels = %lu
\n
"
,
batch_dim
,
num_classes
);
for
(
unsigned
int
i
=
0
;
i
<
batch_dim
;
i
++
)
{
int
chosen
=
0
;
for
(
unsigned
int
id
=
1
;
id
<
num_classes
;
++
id
)
{
if
(
data
[
i
*
num_classes
+
chosen
]
<
data
[
i
*
num_classes
+
id
])
chosen
=
id
;
}
if
(
chosen
!=
labels
[
i
])
num_errors
++
;
}
float
accuracy
=
((
batch_dim
-
num_errors
)
*
1
.
0
/
batch_dim
*
1
.
0
)
*
100
.
0
;
printf
(
"****** Accuracy = %f
\n\n
"
,
accuracy
);
FILE
*
fp
=
fopen
(
"final_accuracy"
,
"w+"
);
if
(
fp
!=
NULL
)
{
std
::
ostringstream
ss
;
ss
<<
std
::
fixed
<<
accuracy
;
std
::
string
print_str
=
ss
.
str
();
fwrite
(
print_str
.
c_str
(),
1
,
print_str
.
length
(),
fp
);
}
fclose
(
fp
);
return
accuracy
;
}
float
computeAccuracy3
(
uint32_t
*
labels
,
void
*
result_ptr
)
{
float
computeAccuracy3
(
uint32_t
*
labels
,
void
*
result_ptr
)
{
...
@@ -509,7 +335,7 @@ float computeAccuracy3(uint32_t *labels, void *result_ptr) {
...
@@ -509,7 +335,7 @@ float computeAccuracy3(uint32_t *labels, void *result_ptr) {
for
(
unsigned
int
i
=
0
;
i
<
batch_dim
;
i
++
)
{
for
(
unsigned
int
i
=
0
;
i
<
batch_dim
;
i
++
)
{
int
chosen
=
0
;
int
chosen
=
0
;
for
(
int
id
=
1
;
id
<
num_classes
;
++
id
)
{
for
(
unsigned
int
id
=
1
;
id
<
num_classes
;
++
id
)
{
if
(
data
[
i
*
num_classes
+
chosen
]
<
data
[
i
*
num_classes
+
id
])
if
(
data
[
i
*
num_classes
+
chosen
]
<
data
[
i
*
num_classes
+
id
])
chosen
=
id
;
chosen
=
id
;
}
}
...
@@ -557,10 +383,10 @@ float computeTop5Accuracy(uint8_t *labels, int num_labels, void *result_ptr,
...
@@ -557,10 +383,10 @@ float computeTop5Accuracy(uint8_t *labels, int num_labels, void *result_ptr,
printf
(
"batch_dim = %lu, channels = %lu
\n
"
,
batch_dim
,
channels
);
printf
(
"batch_dim = %lu, channels = %lu
\n
"
,
batch_dim
,
channels
);
for
(
int
i
=
0
;
i
<
num_labels
;
i
++
)
{
for
(
unsigned
int
i
=
0
;
i
<
num_labels
;
i
++
)
{
std
::
vector
<
ClassProb
>
elem_probs
;
std
::
vector
<
ClassProb
>
elem_probs
;
for
(
int
id
=
0
;
id
<
num_classes
;
++
id
)
{
for
(
unsigned
int
id
=
0
;
id
<
num_classes
;
++
id
)
{
ClassProb
cProb
;
ClassProb
cProb
;
cProb
.
prob
=
data
[
i
*
channels
+
id
];
cProb
.
prob
=
data
[
i
*
channels
+
id
];
cProb
.
index
=
id
;
cProb
.
index
=
id
;
...
@@ -643,6 +469,7 @@ void dumpPSNRStd(float psnr_std) {
...
@@ -643,6 +469,7 @@ void dumpPSNRStd(float psnr_std) {
fclose
(
fp
);
fclose
(
fp
);
}
}
void
dumpExecutionAccuracies
()
{
void
dumpExecutionAccuracies
()
{
FILE
*
fp
=
fopen
(
"run_accuracies.txt"
,
"w+"
);
FILE
*
fp
=
fopen
(
"run_accuracies.txt"
,
"w+"
);
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment