diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/functional/broadcast.h b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/functional/broadcast.h
index 52b70e08ff6cf601c44acbdc07132ada2f629c58..bc85495e209eb8dcd53d8d5b1cc52bc3c4ddcf4e 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/functional/broadcast.h
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/functional/broadcast.h
@@ -5,6 +5,7 @@
 
 #include "common.h"
 #include "tensor.h"
+#include "debug.h"
 
 // TODO: don't accept N == 1
 template <size_t N, typename std::enable_if<N >= 1, int>::type = 0>
@@ -17,7 +18,7 @@ public:
       Tensor *t = tensors[i];
       this->sizes[i] = ::sizes(t);
       if (this->in_dims != t->dims.num_dims)
-        throw std::runtime_error("Broadcast tensors have different dimensions");
+        ERROR("Broadcast tensors have different dimensions\n");
       this->tail_stride[i] = 1;
     }
     fill_broadcast_dims();
@@ -37,8 +38,7 @@ private:
     // Assume all this->in_dims are 1, and compute
     // out_dims is reverse-constructed
     if (this->in_dims < 1)
-      throw std::runtime_error(
-          "Broadcast tensors should have at least 1 dimension");
+      ERROR("Broadcast tensors should have at least 1 dimension\n");
     bool broadcast_ended[N]{false};
     this->out_sizes.resize(this->in_dims, 1);
     for (long i = this->in_dims - 1; i >= 0; i--) {
@@ -48,7 +48,7 @@ private:
         if (this_size == 1)
           continue;
         if (this->out_sizes[i] != 1 && this->out_sizes[i] != this_size)
-          throw std::runtime_error("Dimension size mismatch");
+          ERROR("Dimension size mismatch\n");
         this->out_sizes[i] = this_size;
       }
     }
@@ -62,7 +62,7 @@ private:
           continue;
         }
         if (this->out_sizes[i] != this_size && broadcast_ended[j])
-          throw std::runtime_error("Broadcast dims must be continuous");
+          ERROR("Broadcast dims must be continuous\n");
         else
           tail_stride[j] *= this->out_sizes[i];
       }
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/functional/common.h b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/functional/common.h
index 0b4d7d056550c6f7f32dbc2c276388328fce6e60..6a20ff83c2ae2bc0e264a7cc7559b89524c6b8b7 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/functional/common.h
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/functional/common.h
@@ -27,7 +27,7 @@ template <typename T> class HFProfileGuard {
     if (typeid(T) == typeid(half) || typeid(T) == typeid(half2))
       return end ? "F2H_end" : "F2H_start";
     else
-      throw std::runtime_error("Type not accepted");
+      ERROR("Type not accepted\n");
   }
 
   static bool needProfiling() {
@@ -56,7 +56,7 @@ template <typename T> int getTensorType() {
   else if (typeid(T) == typeid(half2))
     return (int)half2_type;
   else
-    throw std::runtime_error("Unsupported type!");
+    ERROR("Unsupported type!\n");
 }
 
 template <typename T> T *convertAndGetGPUData(Tensor *t);
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/functional/map.cuh b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/functional/map.cuh
index 7727ffc34962a338e69f08b626ad504f2071244b..525afa9529fc25f9b3d7c3082980a5582771ef67 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/functional/map.cuh
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/include/functional/map.cuh
@@ -16,7 +16,7 @@
 template <size_t N> void mapPrecheck(const std::array<Tensor *, N> &srcs) {
   for (Tensor *src : srcs) {
     if (src->dims.num_dims != 4 || src->data_format != CUDNN_TENSOR_NCHW)
-      throw std::runtime_error("Not supported"); // TODO: support this
+      ERROR("Not supported\n"); // TODO: support this
   }
 }
 
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/common.cpp b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/common.cpp
index a35fd3eae58ac708ea10386416072736ddd98d91..08607a90796836d2218c53355a142c9c1e11cf6f 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/common.cpp
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/common.cpp
@@ -17,9 +17,8 @@ template <> float *convertAndGetGPUData<float>(Tensor *t) {
     t->data_type = float_type;
     return static_cast<float *>(t->gpu_data);
   }
-  throw std::runtime_error(
-      "Type " + std::to_string(t->cur_type) +
-      " is incompatible with target type float");
+  ERROR("Type %s is incompatible with target type float\n",
+        std::to_string(t->cur_type));
 }
 
 template <> half *convertAndGetGPUData<half>(Tensor *t) {
@@ -30,9 +29,8 @@ template <> half *convertAndGetGPUData<half>(Tensor *t) {
     t->data_type = half_type;
     return static_cast<half *>(t->gpu_half_data);
   }
-  throw std::runtime_error(
-      "Type " + std::to_string(t->cur_type) +
-      " is incompatible with target type half");
+  ERROR("Type %s is incompatible with target type half\n",
+         std::to_string(t->cur_type));
 }
 
 template <> float2 *convertAndGetGPUData<float2>(Tensor *t) {
@@ -46,9 +44,8 @@ template <> float2 *convertAndGetGPUData<float2>(Tensor *t) {
     t->cur_type = t->data_type = float2_type;
     return static_cast<float2 *>(t->gpu_data);
   }
-  throw std::runtime_error(
-      "Type " + std::to_string(t->cur_type) +
-      " is incompatible with target type float2");
+  ERROR("Type %s is incompatible with target type float2\n",
+        std::to_string(t->cur_type));
 }
 
 template <> half2 *convertAndGetGPUData<half2>(Tensor *t) {
@@ -62,9 +59,8 @@ template <> half2 *convertAndGetGPUData<half2>(Tensor *t) {
     t->cur_type = t->data_type = half2_type;
     return static_cast<half2 *>(t->gpu_half_data);
   }
-  throw std::runtime_error(
-      "Type " + std::to_string(t->cur_type) +
-      " is incompatible with target type half2");
+  ERROR("Type %s is incompatible with target type half2\n",
+        std::to_string(t->cur_type));
 }
 
 void convertToFloat2Offline(Tensor *t) {
@@ -77,9 +73,8 @@ void convertToFloat2Offline(Tensor *t) {
     t->num_elems /= 2;
     t->cur_type = t->data_type = float2_type;
   } else {
-    throw std::runtime_error(
-        "Type " + std::to_string(t->cur_type) +
-        " is incompatible with target type half2");
+    ERROR("Type %s is incompatible with target type half2\n",
+          std::to_string(t->cur_type));
   }
 }
 
@@ -108,7 +103,7 @@ static Tensor_type_t toHalfType(Tensor_type_t float_ty) {
   case half2_type:
     return float_ty;
   default:
-    throw std::runtime_error("Types not acceptable");
+    ERROR("Types not acceptable\n");
   }
 }
 
@@ -122,7 +117,7 @@ static Tensor_type_t toFloatType(Tensor_type_t half_ty) {
   case float2_type:
     return half_ty;
   default:
-    throw std::runtime_error("Types not acceptable");
+    ERROR("Types not acceptable\n");
   }
 }
 
@@ -137,5 +132,5 @@ Tensor_type_t getCompatibleType(int t1, int t2, bool get_half) {
     return get_half ? half_type : float_type;
   if (type1 == half2_type && type2 == float2_type)
     return get_half ? half2_type : float2_type;
-  throw std::runtime_error("Types not acceptable");
+  ERROR("Types not acceptable\n");
 }
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/debug.cpp b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/debug.cpp
index 467336dd411ebc8d805bccf3430b74be98f4fec0..8e163e7049fbe317624e934504d7dc9297032983 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/debug.cpp
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/debug.cpp
@@ -16,7 +16,7 @@ void throwError(const char *file, int line, const char *fmt, ...) {
         snprintf(msg + n, 2048 - n, " at %s:%d", file, line);
     }
 
-    throw std::runtime_error(msg);
+    ERROR(msg);
 }
 
 template<typename T, typename F>
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/device_math.cu b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/device_math.cu
index 41be0d65a46214daffac44719140a8faca505946..002c14b3c72a8f9947195f2dd5f930f4868a6708 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/device_math.cu
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/device_math.cu
@@ -118,7 +118,7 @@ template <> void *mathOpToFunc<float2>(MathOp op) {
   switch (op) {
     CASE_FUNC(Mul, f2mul)
   default:
-    throw std::runtime_error("Float2 function not found");
+    ERROR("Float2 function not found\n");
   }
 }
 
@@ -126,7 +126,7 @@ template <> void *mathOpToFunc<half2>(MathOp op) {
   switch (op) {
     CASE_FUNC(Mul, h2mul)
   default:
-    throw std::runtime_error("Half2 function not found");
+    ERROR("Half2 function not found\n");
   }
 }
 
@@ -146,7 +146,7 @@ template <> void *mathOpToFunc<float>(MathOp op) {
     CASE_FUNC(AddWeighted, addWeighted)
     CASE_FUNC(PSNR, psnr)
   default:
-    throw std::runtime_error("Float function not found");
+    ERROR("Float function not found\n");
   }
 }
 
@@ -163,7 +163,7 @@ template <> void *mathOpToFunc<half>(MathOp op) {
     CASE_FUNC(Blend2, h2blend2)
     CASE_FUNC(AddWeighted, h2addWeighted)
   default:
-    throw std::runtime_error("Half function not found");
+    ERROR("Half function not found\n");
   }
 }
 
@@ -178,6 +178,6 @@ template <> half reduceOpToIdentity<half>(MathOp op) {
   case MathOp::Min:
     return 65504.0f;
   default:
-    throw std::runtime_error("Operator does not have id value");
+    ERROR("Operator does not have id value\n");
   }
 }
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/img_tensor_runtime.cu b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/img_tensor_runtime.cu
index 2db6a9125a2658b2230dd97b791794ed5956ea24..c2cc1ef5f7a7e86d9265057fd8ff0f17071a705f 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/img_tensor_runtime.cu
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/img_tensor_runtime.cu
@@ -27,7 +27,7 @@ void *tensorFft(void *input, bool inverse) {
   auto *t_input = (Tensor *)input;
   int total_rank = t_input->dims.num_dims;
   if (total_rank != 4)
-    throw std::runtime_error("Only 4-dim tensor supported");
+    ERROR("Only 4-dim tensor supported\n");
   // Dimensions
   size_t *all_dims = t_input->dims.dim_sizes;
   int height = all_dims[2], width = all_dims[3];
@@ -89,7 +89,7 @@ void *tensorFftHalf(void *input, bool inverse) {
   auto *t_input = (Tensor *)input;
   int total_rank = t_input->dims.num_dims;
   if (total_rank != 4)
-    throw std::runtime_error("Only 4-dim tensor supported");
+    ERROR("Only 4-dim tensor supported\n");
   // Dimensions
   size_t *all_dims = t_input->dims.dim_sizes;
   int height = all_dims[2], width = all_dims[3];
@@ -407,9 +407,9 @@ void *tensorReduce(void *input, size_t axis, MathOp func, float skip_ratio) {
   profileEvent("tensorReduce");
   auto *src = (Tensor *)input;
   if (axis >= src->dims.num_dims)
-    throw std::runtime_error("Dimension out of range");
+    ERROR("Dimension out of range\n");
   if (src->dims.num_dims != 4 || src->data_format != CUDNN_TENSOR_NCHW)
-    throw std::runtime_error("Not supported");
+    ERROR("Not supported\n");
   Tensor *ret = reduceDim<float>(src, 0.0f, func, axis, skip_ratio);
   profileEvent("tensorReduce_end");
   return ret;
@@ -421,9 +421,9 @@ tensorReduceHalf(void *input, size_t axis, MathOp func, float skip_ratio) {
   profileEvent("#tensorReduce");
   auto *src = (Tensor *)input;
   if (axis >= src->dims.num_dims)
-    throw std::runtime_error("Dimension out of range");
+    ERROR("Dimension out of range\n");
   if (src->dims.num_dims != 4 || src->data_format != CUDNN_TENSOR_NCHW)
-    throw std::runtime_error("Not supported");
+    ERROR("Not supported\n");
   Tensor *ret = reduceDim<half>(src, 0.0f, func, axis, skip_ratio);
   profileEvent("H2F_start");
   convertToFP32_offline(ret);
@@ -470,7 +470,7 @@ void *tensorMap2(MathOp f2, void *i1, void *i2) {
   else if (common_ty == float2_type)
     ret = mapGeneral<float2, 2>(f2, {src1, src2});
   else
-    throw std::runtime_error("Type not recognized");
+    ERROR("Type not recognized\n");
   profileEvent("tensorMap2_end");
   return ret;
 }
@@ -498,7 +498,7 @@ void *tensorMap2Half(MathOp f2, void *i1, void *i2) {
     return ret;
   }
   else
-    throw std::runtime_error("Type not recognized");
+    ERROR("Type not recognized\n");
 }
 
 void *tensorMap3(MathOp f3, void *i1, void *i2, void *i3) {
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/img_tensor_utils.cpp b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/img_tensor_utils.cpp
index 07f2a5e692cebc563dba48ba80afbcbcf84bfb6c..bdce67ddedc24e7192b98bd3beff6aaec942a2d7 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/img_tensor_utils.cpp
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/img_tensor_utils.cpp
@@ -47,8 +47,7 @@ static Tensor *to_nhwc(Tensor *t) {
     DEBUG("Tensor already in NHWC format, no conversion needed\n");
     return t;
   } else if (t->data_format != CUDNN_TENSOR_NCHW) {
-    throw std::runtime_error(
-        "Unknown tensor format: " + std::to_string(t->data_format));
+    ERROR("Unknown tensor format: %s\n", std::to_string(t->data_format));
   } else {
     DEBUG("Converting to NHWC format\n");
   }
@@ -79,8 +78,7 @@ static Tensor *to_nchw(Tensor *t) {
     DEBUG("Tensor already in NCHW format, no conversion needed\n");
     return t;
   } else if (t->data_format != CUDNN_TENSOR_NHWC) {
-    throw std::runtime_error(
-        "Unknown tensor format: " + std::to_string(t->data_format));
+    ERROR("Unknown tensor format: %s\n", std::to_string(t->data_format));
   } else {
     DEBUG("Converting to NCHW format\n");
   }
@@ -150,12 +148,12 @@ readDataSet(const char *path, size_t start, size_t count, size_t n_color) {
     int x, y, n; // x = width, y = height, n = # 8-bit components per pixel
     uint8_t *data = stbi_load(path.c_str(), &x, &y, &n, n_color);
     if (data == nullptr)
-      throw std::runtime_error("Image load failed");
+      ERROR("Image load failed\n");
     if (x != h || y != w) {
       std::ostringstream os;
       os << "Image file " << path << " have different shape (" << x << ", " << y
          << ")";
-      throw std::runtime_error(os.str());
+      ERROR("%s\n", os.str());
     }
     float *converted = uint8_to_float(data, n_floats);
     stbi_image_free(data);
@@ -223,7 +221,7 @@ void saveDataSet(
 
     uint8_t *ldr_data = float_to_uint8(base_data, h * w * c);
     if (!stbi_write_png(name.c_str(), w, h, c, ldr_data, 0))
-      throw std::runtime_error("Write file failed");
+      ERROR("Write file failed\n");
     delete[] ldr_data;
 
     base_data += h * w * c;
@@ -235,7 +233,7 @@ void *loadAsImage(const char *filename, size_t n_color) {
   int x, y, n; // x = width, y = height, n = # 8-bit components per pixel
   uint8_t *data = stbi_load(filename, &x, &y, &n, n_color);
   if (data == nullptr)
-    throw std::runtime_error("Image load failed");
+    ERROR("Image load failed\n");
   float *converted = uint8_to_float(data, x * y * n);
   DEBUG("Loading shape: (1, %lu, %lu, %lu)(NHWC)\n", y, x, n_color);
   auto *image =
@@ -271,7 +269,7 @@ void *createFilterFromData(
   if (data_type == CUDNN_DATA_HALF || data_type == CUDNN_DATA_FLOAT)
     tensor_data = (char *)tensor->host_data;
   else {
-    throw std::runtime_error("Data type unsupported as filter");
+    ERROR("Data type unsupported as filter\n");
   }
   size_t channel_sz = tensor->size_in_bytes / n_chan;
   for (size_t i = 0; i < n_chan; i++, tensor_data += channel_sz) {
@@ -428,7 +426,7 @@ void reshape(void *t, const std::vector<size_t> &shape) {
   auto *tensor = (Tensor *)t;
   size_t in_n = num_elems(tensor), out_n = num_elems(shape);
   if (in_n != out_n)
-    throw std::runtime_error("Reshaping cannot change number of elements");
+    ERROR("Reshaping cannot change number of elements\n");
   tensor->dims.num_dims = shape.size();
   free(tensor->dims.dim_sizes);
   tensor->dims.dim_sizes = (size_t *)malloc(sizeof(size_t) * shape.size());
diff --git a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/tensor_utils.cu b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/tensor_utils.cu
index b68f7571cf04b79ce45da94418a3c933ba3af432..74a87faa2d13839b75f5d57d564fd31d0d45e2f9 100644
--- a/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/tensor_utils.cu
+++ b/llvm/projects/hpvm-tensor-rt/tensor_runtime/src/tensor_utils.cu
@@ -70,7 +70,7 @@ int getTypeSize(int data_type){
     case half2_type:
       return 4;
     default:
-      throw std::runtime_error("Unknown type " + std::to_string(data_type));
+      ERROR("Unknown type %s\n", std::to_string(data_type));
   }
 }
 
@@ -88,7 +88,7 @@ static int getFullPrecTypeSize(int data_type){
     case half2_type:
       return 8;
     default:
-      throw std::runtime_error("Unknown type " + data_type);
+      ERROR("Unknown type %d\n", data_type);
   }
 }