diff --git a/hpvm/lib/Transforms/DFG2LLVM_WrapperAPI/DFG2LLVM_WrapperAPI.cpp b/hpvm/lib/Transforms/DFG2LLVM_WrapperAPI/DFG2LLVM_WrapperAPI.cpp
index 3fa6860b9c7cb12b2f76c22196fa744cd052c2bb..ec5a84cffb31b67cfcc0f9efc7a7c2cc3f4be4a1 100644
--- a/hpvm/lib/Transforms/DFG2LLVM_WrapperAPI/DFG2LLVM_WrapperAPI.cpp
+++ b/hpvm/lib/Transforms/DFG2LLVM_WrapperAPI/DFG2LLVM_WrapperAPI.cpp
@@ -49,11 +49,6 @@ using namespace inplacedfg;
 
 namespace {
 
-cl::opt<std::string> QuantizationInputsFilename(
-    "quantization-levels-filename",
-    cl::desc("<PROMISE quantization levels input file (path)>"),
-    cl::value_desc("filename"), cl::Required);
-
 cl::opt<std::string> ConfigurationInputsFilename(
     "configuration-inputs-filename",
     cl::desc("<Autotuner configurations input file (path)>"),
@@ -1241,8 +1236,6 @@ class CGT_WrapperAPI : public CodeGenTraversal {
 private:
   // Member variables
   unsigned nodeID; // Used as a node identifier
-
-  std::string QuantizationInputsFilenameStr;
   std::string ConfigurationInputsFilenameStr;
 
   InPlaceDFGAnalysis::InPlaceDFGParameter *IPP;
@@ -1267,10 +1260,8 @@ public:
   // Constructor
   CGT_WrapperAPI(Module &_M, BuildDFG &_DFG,
                  InPlaceDFGAnalysis::InPlaceDFGParameter &_IPP,
-                 std::string &_QuantizationInputsFilenameStr,
                  std::string &_ConfigurationInputsFilenameStr)
       : CodeGenTraversal(_M, _DFG), IPP(&_IPP),
-        QuantizationInputsFilenameStr(_QuantizationInputsFilenameStr),
         ConfigurationInputsFilenameStr(_ConfigurationInputsFilenameStr) {
     nodeID = 0;
     initRuntimeAPI();
@@ -1314,19 +1305,6 @@ void CGT_WrapperAPI::initRuntimeAPI() {
       ArrayRef<Value *>(ConstantInt::get(Type::getInt32Ty(M.getContext()), 0)),
       "", InitCall);
 
-  StringRef QRangesStrRef = StringRef(QuantizationInputsFilenameStr);
-  // Create string for node name, as first argument for wrapper API call
-  Constant *ConstArray1 =
-      ConstantDataArray::getString(M.getContext(), QRangesStrRef, true);
-  GlobalVariable *GV1 =
-      new GlobalVariable(M, ConstArray1->getType(), true,
-                         GlobalValue::ExternalLinkage, ConstArray1, "");
-  // Create GEP expression to access it
-  Constant *Int_0 = ConstantInt::get(Type::getInt32Ty(M.getContext()), 0);
-  Constant *GEPIndices[] = {Int_0, Int_0};
-  Constant *QRangesGEPConst = ConstantExpr::getGetElementPtr(
-      GV1->getType()->getPointerElementType(), GV1, GEPIndices);
-
   StringRef ConfsStrRef = StringRef(ConfigurationInputsFilenameStr);
   // Create string for node name, as first argument for wrapper API call
   Constant *ConstArray2 =
@@ -1334,10 +1312,12 @@ void CGT_WrapperAPI::initRuntimeAPI() {
   GlobalVariable *GV2 =
       new GlobalVariable(M, ConstArray2->getType(), true,
                          GlobalValue::ExternalLinkage, ConstArray2, "");
+  // Create GEP expression to access it
+  Constant *Int_0 = ConstantInt::get(Type::getInt32Ty(M.getContext()), 0);
+  Constant *GEPIndices[] = {Int_0, Int_0};
   Constant *ConfsGEPConst = ConstantExpr::getGetElementPtr(
       GV2->getType()->getPointerElementType(), GV2, GEPIndices);
-  Value *RTCInitArgs[] = {ConfsGEPConst, QRangesGEPConst};
-  CallInst::Create(llvm_hpvm_initializeRuntimeController, RTCInitArgs, "",
+  CallInst::Create(llvm_hpvm_initializeRuntimeController, {ConfsGEPConst}, "",
                    InitCall);
 
   Function *VC = M.getFunction("llvm.hpvm.cleanup");
@@ -1464,7 +1444,8 @@ bool DFG2LLVM_WrapperAPI::runOnModule(Module &M) {
 
   // Visitor for Code Generation Graph Traversal
   CGT_WrapperAPI *CGTVisitor = new CGT_WrapperAPI(
-      M, DFG, IPP, QuantizationInputsFilename, ConfigurationInputsFilename);
+    M, DFG, IPP, ConfigurationInputsFilename
+  );
 
   // Iterate over all the DFGs and produce code for each one of them
   for (auto rootNode : Roots) {
diff --git a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/include/configuration.h b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/include/configuration.h
index ca2bcd96661d3dc4ba62f3389a0b84d109d43bc9..b4f3d39fae77b214a46301ba7d6c95a5e651c44f 100644
--- a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/include/configuration.h
+++ b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/include/configuration.h
@@ -11,14 +11,12 @@
 // Describes the internal choices made for an ApproxHPVM node
 class NodeConfiguration {
 public:
-  enum NODE_CONFIGURATION_TARGET { PROMISE, GPU, CPU, END };
+  enum NODE_CONFIGURATION_TARGET { GPU, CPU, END };
 
 protected:
   enum NODE_CONFIGURATION_TARGET NODE_CONFIGURATION_TARGET_ID;
 
 public:
-  bool isPROMISENodeConfiguration();
-
   bool isGPUNodeConfiguration();
 
   bool isCPUNodeConfiguration();
@@ -26,27 +24,6 @@ public:
   virtual void print() = 0;
 };
 
-class PROMISENodeConfiguration : public NodeConfiguration {
-public:
-  // Approximation methods available for this HW type
-  enum APPROX { SWING_LEVEL, END };
-
-private:
-  // A vector, containing pairs of approximation method and tunable parameter
-  // (expressed as int, or ignored when not applicable)
-  std::vector<std::pair<enum APPROX, int>> ApproxChoices;
-
-public:
-  void pushNewApproximationChoice(enum APPROX approx, int u);
-
-  std::vector<std::pair<enum APPROX, int>> &getApproxChoices();
-
-  PROMISENodeConfiguration();
-  ~PROMISENodeConfiguration();
-
-  void print() override;
-};
-
 class GPUNodeConfiguration : public NodeConfiguration {
 public:
   // Approximation methods available for this HW type
diff --git a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/include/hpvm-rt-controller.h b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/include/hpvm-rt-controller.h
index 0a207edc51c6bf029d6a5100c8617e8d3e811b31..cbd44313873a5ed79a94a17c54ca4d8e57cf09d4 100644
--- a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/include/hpvm-rt-controller.h
+++ b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/include/hpvm-rt-controller.h
@@ -155,8 +155,6 @@ extern RuntimeController *RC;
 class RuntimeController {
 private:
   // Members
-  // Map from node names to quantization ranges
-  std::map<std::string, std::vector<float>> QuantizationMap;
 
   // Configurations.
   // Configurations initially read - all generated from autotuner
@@ -203,7 +201,6 @@ private:
   void stop_profiler();
 
   void setProfileInfoFilename(const char *);
-  void readQuantizationFile(const char *);
   void readConfigurationFile(const char *);
 
   void computeParetoConfigurationPoints();
@@ -224,8 +221,6 @@ public:
   double getCurrentConfigurationAccuracy();
   double getCurrentConfigurationAccuracyLoss();
 
-  std::vector<float> &getQuantizationRanges(const char *data);
-
   NodeConfiguration *getNodeConfiguration(const char *data);
 
   // Functions for runtime control
@@ -241,7 +236,7 @@ public:
   double getBaselineTime();
   Slowdowns *getSlowdowns();
 
-  void init(const char *Cstr, const char *Qstr);
+  void init(const char *Cstr);
 
   // Exposing functionality of ProfileInfo
   void end_iteration();
@@ -298,7 +293,6 @@ public:
   ~RuntimeController();
 
   // Helper Functions
-  void printQuantizationMap();
   void printConfigurations(std::vector<struct Configuration> &);
   void printConfigurations(std::vector<struct Configuration *> &);
 };
diff --git a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/include/rt-controller-api.h b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/include/rt-controller-api.h
index 28dbf715e7350b496a2cfb6f550e8e3a83865671..f2c732cb2743daebadec4fddc2ad88d799959dbb 100644
--- a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/include/rt-controller-api.h
+++ b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/include/rt-controller-api.h
@@ -1,6 +1,6 @@
 extern "C" {
 // Functions to be inserted with initializeTensorRT and clearTensorRT
-void llvm_hpvm_initializeRuntimeController(const char *, const char *);
+void llvm_hpvm_initializeRuntimeController(const char *);
 void llvm_hpvm_clearRuntimeController();
 void llvm_hpvm_invokeRtControl(void *result, const char *str, int start,
                                int end);
diff --git a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/configuration.cpp b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/configuration.cpp
index 7b8865966f03d838b2de1fe06339c4a8620870b1..fd1492fe68e8833ea4cdca4d5df6518b6ec3b37c 100644
--- a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/configuration.cpp
+++ b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/configuration.cpp
@@ -10,16 +10,11 @@
 
 #include "configuration.h"
 
-using P_APPROX = PROMISENodeConfiguration::APPROX;
 using G_APPROX = GPUNodeConfiguration::APPROX;
 using C_APPROX = CPUNodeConfiguration::APPROX;
 using G_TENSOR_OP = GPUNodeConfiguration::TENSOR_OP;
 using C_TENSOR_OP = CPUNodeConfiguration::TENSOR_OP;
 
-bool NodeConfiguration::isPROMISENodeConfiguration() {
-  return NODE_CONFIGURATION_TARGET_ID == PROMISE;
-}
-
 bool NodeConfiguration::isGPUNodeConfiguration() {
   return NODE_CONFIGURATION_TARGET_ID == GPU;
 }
@@ -28,22 +23,6 @@ bool NodeConfiguration::isCPUNodeConfiguration() {
   return NODE_CONFIGURATION_TARGET_ID == CPU;
 }
 
-void PROMISENodeConfiguration::pushNewApproximationChoice(
-    P_APPROX approx, int u) {
-  ApproxChoices.push_back(std::make_pair(approx, u));
-}
-
-std::vector<std::pair<P_APPROX, int>> &
-PROMISENodeConfiguration::getApproxChoices() {
-  return ApproxChoices;
-}
-
-PROMISENodeConfiguration::PROMISENodeConfiguration() {
-  NODE_CONFIGURATION_TARGET_ID = PROMISE;
-}
-
-PROMISENodeConfiguration::~PROMISENodeConfiguration() {}
-
 void GPUNodeConfiguration::pushNewTensorOperation(G_TENSOR_OP top) {
   std::vector<std::pair<G_APPROX, int>> emptyVec;
   ApproxChoices.push_back(std::make_pair(top, emptyVec));
@@ -124,26 +103,6 @@ operator()(const struct Configuration *a, const float &b) const {
 
 // Helper configuration print methods
 
-void PROMISENodeConfiguration::print() {
-
-  printf(" promise");
-  for (auto &it : ApproxChoices) {
-    printf(" ");
-    switch (it.first) {
-    case P_APPROX::SWING_LEVEL:
-      printf("swing_level");
-      break;
-      // TODO additional approx methods to be printed here
-    default:
-      ERROR("Unknown approximation option");
-      break;
-    }
-    printf(" %d", it.second);
-  }
-
-  printf("\n");
-}
-
 void GPUNodeConfiguration::print() {
 
   printf(" gpu");
diff --git a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/hpvm-rt-controller.cpp b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/hpvm-rt-controller.cpp
index 5b0f0beedb4a13bbe484175ade0e2f5364e7be13..24ba749cb57953cfec2985ef47c37282bf6f0f93 100644
--- a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/hpvm-rt-controller.cpp
+++ b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/hpvm-rt-controller.cpp
@@ -420,12 +420,6 @@ double RuntimeController::getCurrentConfigurationAccuracyLoss() {
   return (double) (*Configurations)[configurationIdx]->accuracyLoss;
 }
 
-std::vector<float> &RuntimeController::getQuantizationRanges(const char *data) {
-  std::string s(data);
-  // All nodes are expected to have quantization ranges
-  return QuantizationMap.at(s);
-}
-
 NodeConfiguration *RuntimeController::getNodeConfiguration(const char *data) {
 
   // if visc.node.id Not specified for this HPVM Node
@@ -441,12 +435,10 @@ NodeConfiguration *RuntimeController::getNodeConfiguration(const char *data) {
   
 }
 
-void RuntimeController::init(const char *Cstr, const char *Qstr) {
+void RuntimeController::init(const char *Cstr) {
   // We initialize the path to the profile info output file,
   // based on the path given for the configuration file
   setProfileInfoFilename(Cstr);
-
-  readQuantizationFile(Qstr);
   readConfigurationFile(Cstr);
 
   // NOTE: Configurations is pareto-configs. InitialConfigurations is the full list (config file)
@@ -668,59 +660,6 @@ void RuntimeController::setProfileInfoFilename(const char *str) {
     PI->set_out_file_name(profile_filename);
   }
 }
-void RuntimeController::readQuantizationFile(const char *str) {
-
-  INFO("Reading Quantization Ranges File...\n");
-
-  if (std::string(str).empty()) {
-    INFO("Empty quantization file string.\n");
-    return;
-  }
-
-  std::ifstream qin(str);
-
-  if (!qin) {
-    ERROR("Failed to open PROMISE quantization file.");
-    abort();
-  }
-
-  while (!qin.eof()) {
-    char NodeName[NODE_NAME_BUFFER_SIZE];
-    std::vector<float> QuantRangeVector;
-
-    qin >> NodeName;
-
-    float qrange;
-    for (unsigned i = 0; i < 8; i++) {
-      qin >> qrange;
-      QuantRangeVector.push_back(qrange);
-    }
-    // See if we need to insert this in map instead - my lookup test seemed to
-    // work without it std::string s(NodeName);
-    QuantizationMap.insert(
-        std::pair<std::string, std::vector<float>>(NodeName, QuantRangeVector));
-  }
-
-  qin.close();
-  INFO("DONE.\n");
-}
-
-void RuntimeController::printQuantizationMap() {
-
-  DEBUG("Quantization Ranges Map:\n");
-
-  for (std::map<std::string, std::vector<float>>::const_iterator it =
-           QuantizationMap.begin();
-       it != QuantizationMap.end(); ++it) {
-    DEBUG("%s :", it->first.c_str());
-
-    for (unsigned i = 0; i < it->second.size(); i++) {
-      DEBUG(" %f", it->second[i]);
-    }
-
-    DEBUG("\n");
-  }
-}
 
 void RuntimeController::readConfigurationFile(const char *str) {
 
@@ -790,31 +729,7 @@ void RuntimeController::readConfigurationFile(const char *str) {
       continue;
     }
 
-    if (tokens[1] == "promise") {
-      DEBUG("Found promise configuration\n");
-
-      // There must be at least one approximation option
-      CUSTOM_ASSERT(
-          (tokens.size() >= 2) && "Not enough approximation options.");
-
-      PROMISENodeConfiguration *NodeConf = new PROMISENodeConfiguration();
-      InitialConfigurations.back().setup.insert(
-          std::make_pair(tokens[0], NodeConf));
-
-      // In increments of two, to handle pairs of approx option - tunable
-      // parameter
-      for (unsigned idx = 2; idx < tokens.size(); idx += 2) {
-        if (tokens[idx] == "swing_level") {
-          DEBUG("Found swing voltage option\n");
-          int vswing = std::stoi(tokens[idx + 1]);
-          DEBUG("vswing: %d\n", vswing);
-          NodeConf->pushNewApproximationChoice(
-              PROMISENodeConfiguration::APPROX::SWING_LEVEL, vswing);
-        }
-        // TODO: other approximation options handled here
-      }
-
-    } else if (tokens[1] == "gpu") {
+    if (tokens[1] == "gpu") {
       DEBUG("Found gpu configuration\n");
 
       // There must be at least one operation, with an approximation option
@@ -1483,10 +1398,9 @@ double RuntimeController::getBaselineTime() { return baseline_time; }
 Slowdowns *RuntimeController::getSlowdowns() { return slowdowns; }
 
 // Functions to be inserted with initializeTensorRT and clearTensorRT
-extern "C" void llvm_hpvm_initializeRuntimeController(
-    const char *ConfigFile, const char *QRangeFile) {
+extern "C" void llvm_hpvm_initializeRuntimeController(const char *ConfigFile) {
   RC = new RuntimeController();
-  RC->init(ConfigFile, QRangeFile);
+  RC->init(ConfigFile);
   return;
 }
 
diff --git a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/wrapper_runtime.cu b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/wrapper_runtime.cu
index 3f433be855a762028d94d3871abc4d8971507c46..5cdfdf5a55109fac66a89f544306fbe7b4b9562a 100644
--- a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/wrapper_runtime.cu
+++ b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/wrapper_runtime.cu
@@ -59,68 +59,7 @@ extern "C"{
 
     NodeConfiguration *NodeConf = RC->getNodeConfiguration(hpvm_node_id);
 
-    if (NodeConf->isPROMISENodeConfiguration()) {
-      DEBUG("PROMISE Configuration for ConvLayer\n");
-      // Mapped to PROMISE - get a PROMISE node configuration
-      PROMISENodeConfiguration *PROMISEConf = (PROMISENodeConfiguration *)NodeConf;
-      std::vector<float> &QRanges = RC->getQuantizationRanges(hpvm_node_id);
-
-      std::vector<std::pair<PROMISENodeConfiguration::APPROX, int> > &approxTuples =
-	PROMISEConf->getApproxChoices();
-
-      if (approxTuples.size() == 1) {
-	enum PROMISENodeConfiguration::APPROX approx = approxTuples[0].first;
-	int param = approxTuples[0].second;
-	if (approx == PROMISENodeConfiguration::APPROX::SWING_LEVEL) {
-	  DEBUG("Approximation choice for ConvLayer: swing level %d\n", param);
-
-	  struct Tensor* input_tensor_cast = (struct Tensor*) input;
-	  struct Tensor* filter_tensor_cast = (struct Tensor*) filter;
-	  std::pair<double, double> pinfo =
-	    RC->conv_profile(input_tensor_cast->dims.dim_sizes[0], //n
-			     input_tensor_cast->dims.dim_sizes[1], //c
-			     input_tensor_cast->dims.dim_sizes[2], //h
-			     input_tensor_cast->dims.dim_sizes[3], //w
-			     filter_tensor_cast->dims.dim_sizes[0], //c_out
-			     filter_tensor_cast->dims.dim_sizes[1], //c_in
-			     filter_tensor_cast->dims.dim_sizes[2], //k_h
-			     filter_tensor_cast->dims.dim_sizes[3], //k_w
-			     conv_stride_h, //s_h
-			     conv_stride_w, //s_w
-			     param, //voltage_swing
-			     filter_tensor_cast->dims.dim_sizes[2] *
-                             filter_tensor_cast->dims.dim_sizes[3] /*patch_factor: k_h*k_w*/);
-	  RC->addToCurrentIterationComputeTime("ConvLayer_PROMISE", pinfo.first);
-	  RC->addToCurrentIterationComputeEnergy("ConvLayer_PROMISE", pinfo.second);
-	  void* t_out;
-	  // FIXIT: The pool_size in second param should be pool_stride
-	  t_out = PROMISE_Conv(input, QRanges[0], QRanges[1],
-			       filter, QRanges[2], QRanges[3],
-			       bias, QRanges[4], QRanges[5],
-			       conv_pad_h, conv_pad_w,
-			       conv_stride_h, conv_stride_w,
-			       pool_id, pool_size, pool_size,
-			       activation_id,
-			       QRanges[6], QRanges[7], param);
-
-	  return t_out;
-	} else {
-	  CUSTOM_ASSERT(false && "Unknown approximation type");
-	  ERROR("Unknown approximation type");
-	  abort();
-	}
-	// TODO additional approx methods implemented here
-
-      } else if (approxTuples.size() == 2) {
-	ERROR("Currently unsupported case");
-	abort();
-      } else {
-	ERROR("Unsupported case");
-	abort();
-      }
-    }
-    else
-      if (NodeConf->isGPUNodeConfiguration()) {
+    if (NodeConf->isGPUNodeConfiguration()) {
 	DEBUG("GPU Configuration for ConvLayer\n");
 	// Mapped to GPU - get a GPU node configuration
 	GPUNodeConfiguration *GPUConf = (GPUNodeConfiguration *)NodeConf;
@@ -271,69 +210,7 @@ extern "C"{
     INFO ("*** Conv Layer \n");
     
     NodeConfiguration *NodeConf = RC->getNodeConfiguration(hpvm_node_id);
-
-    if (NodeConf->isPROMISENodeConfiguration()) {
-      DEBUG("PROMISE Configuration for ConvLayer\n");
-      // Mapped to PROMISE - get a PROMISE node configuration
-      PROMISENodeConfiguration *PROMISEConf = (PROMISENodeConfiguration *)NodeConf;
-      std::vector<float> &QRanges = RC->getQuantizationRanges(hpvm_node_id);
-
-      std::vector<std::pair<PROMISENodeConfiguration::APPROX, int> > &approxTuples =
-	PROMISEConf->getApproxChoices();
-
-      if (approxTuples.size() == 1) {
-	enum PROMISENodeConfiguration::APPROX approx = approxTuples[0].first;
-	int param = approxTuples[0].second;
-	if (approx == PROMISENodeConfiguration::APPROX::SWING_LEVEL) {
-	  DEBUG("Approximation choice for ConvLayer: swing level %d\n", param);
-
-	  struct Tensor* input_tensor_cast = (struct Tensor*) input;
-	  struct Tensor* filter_tensor_cast = (struct Tensor*) filter;
-	  std::pair<double, double> pinfo =
-	    RC->conv_profile(input_tensor_cast->dims.dim_sizes[0], //n
-			     input_tensor_cast->dims.dim_sizes[1], //c
-			     input_tensor_cast->dims.dim_sizes[2], //h
-			     input_tensor_cast->dims.dim_sizes[3], //w
-			     filter_tensor_cast->dims.dim_sizes[0], //c_out
-			     filter_tensor_cast->dims.dim_sizes[1], //c_in
-			     filter_tensor_cast->dims.dim_sizes[2], //k_h
-			     filter_tensor_cast->dims.dim_sizes[3], //k_w
-			     conv_stride_h, //s_h
-			     conv_stride_w, //s_w
-			     param, //voltage_swing
-			     filter_tensor_cast->dims.dim_sizes[2] *
-                             filter_tensor_cast->dims.dim_sizes[3] /*patch_factor: k_h*k_w*/);
-	  RC->addToCurrentIterationComputeTime("ConvLayer_PROMISE", pinfo.first);
-	  RC->addToCurrentIterationComputeEnergy("ConvLayer_PROMISE", pinfo.second);
-	  void* t_out;
-	  // FIXIT: The pool_size in second param should be pool_stride
-	  t_out = PROMISE_Conv(input, QRanges[0], QRanges[1],
-			       filter, QRanges[2], QRanges[3],
-			       bias, QRanges[4], QRanges[5],
-			       conv_pad_h, conv_pad_w,
-			       conv_stride_h, conv_stride_w,
-			       pool_id, pool_size_v, pool_size_h,
-			       activation_id,
-			       QRanges[6], QRanges[7], param);
-
-	  return t_out;
-	} else {
-	  CUSTOM_ASSERT(false && "Unknown approximation type");
-	  ERROR("Unknown approximation type");
-	  abort();
-	}
-	// TODO additional approx methods implemented here
-
-      } else if (approxTuples.size() == 2) {
-	ERROR("Currently unsupported case");
-	abort();
-      } else {
-	ERROR("Unsupported case");
-	abort();
-      }
-    }
-    else
-      if (NodeConf->isGPUNodeConfiguration()) {
+	if (NodeConf->isGPUNodeConfiguration()) {
 	DEBUG("GPU Configuration for ConvLayer\n");
 	// Mapped to GPU - get a GPU node configuration
 	GPUNodeConfiguration *GPUConf = (GPUNodeConfiguration *)NodeConf;
@@ -495,64 +372,7 @@ extern "C"{
     INFO ("*** Dense Layer \n");
     
     NodeConfiguration *NodeConf = RC->getNodeConfiguration(hpvm_node_id);
-
-    if (NodeConf->isPROMISENodeConfiguration()) {
-      DEBUG("PROMISE Configuration for FCLayer\n");
-      // Mapped to PROMISE - get a PROMISE node configuration
-      PROMISENodeConfiguration *PROMISEConf = (PROMISENodeConfiguration *)NodeConf;
-      std::vector<float> &QRanges = RC->getQuantizationRanges(hpvm_node_id);
-
-      std::vector<std::pair<PROMISENodeConfiguration::APPROX, int> > &approxTuples =
-	PROMISEConf->getApproxChoices();
-
-      if (approxTuples.size() == 1) {
-	enum PROMISENodeConfiguration::APPROX approx = approxTuples[0].first;
-	int param = approxTuples[0].second;
-	if (approx == PROMISENodeConfiguration::APPROX::SWING_LEVEL) {
-	  DEBUG("Approximation choice for FCLayer: swing level %d\n", param);
-
-	  struct Tensor* input_tensor_cast = (struct Tensor*) input;
-	  struct Tensor* weights_tensor_cast = (struct Tensor*) weights;
-	  CUSTOM_ASSERT((input_tensor_cast->dims.dim_sizes[1] *
-			 input_tensor_cast->dims.dim_sizes[2] *
-			 input_tensor_cast->dims.dim_sizes[3] ==
-                         weights_tensor_cast->dims.dim_sizes[2]) &&
-			"Dimensions for matrix multiplication do not match.");
-	  std::pair<double, double> pinfo =
-	    RC->fc_profile(input_tensor_cast->dims.dim_sizes[0], //num_rows_a,
-			   input_tensor_cast->dims.dim_sizes[1] *
-                           input_tensor_cast->dims.dim_sizes[2] *
-                           input_tensor_cast->dims.dim_sizes[3], //num_cols_a,
-			   weights_tensor_cast->dims.dim_sizes[2], //num_rows_b,
-			   weights_tensor_cast->dims.dim_sizes[3], //num_cols_b,
-			   param, //voltage_swing,
-			   1 /*patch_factor*/);
-	  RC->addToCurrentIterationComputeTime("FCLayer_PROMISE", pinfo.first);
-	  RC->addToCurrentIterationComputeEnergy("FCLayer_PROMISE", pinfo.second);
-	  void* t_out;
-	  t_out = PROMISE_FC(input, QRanges[0], QRanges[1],
-			     weights, QRanges[2], QRanges[3],
-			     bias, QRanges[4], QRanges[5],
-			     activation_id,
-			     QRanges[6], QRanges[7], param);
-	  return t_out;
-	} else {
-	  CUSTOM_ASSERT(false && "Unknown approximation type");
-	  ERROR("Unknown approximation type");
-	  abort();
-	}
-	// TODO additional approx methods implemented here
-
-      } else if (approxTuples.size() == 2) {
-	ERROR("Currently unsupported case");
-	abort();
-      } else {
-	ERROR("Unsupported case");
-	abort();
-      }
-    }
-    else
-      if (NodeConf->isGPUNodeConfiguration()) {
+	if (NodeConf->isGPUNodeConfiguration()) {
 	DEBUG("GPU Configuration for FCLayer\n");
 	// Mapped to GPU - get a GPU node configuration
 	GPUNodeConfiguration *GPUConf = (GPUNodeConfiguration *)NodeConf;
diff --git a/hpvm/test/dnn_benchmarks/hpvm-c/CMakeLists.txt b/hpvm/test/dnn_benchmarks/hpvm-c/CMakeLists.txt
index ab03bdeed6d74d8036171622faad393c52336309..76d6910d2d43d641f5a2dfff1d48b39fe25686a4 100644
--- a/hpvm/test/dnn_benchmarks/hpvm-c/CMakeLists.txt
+++ b/hpvm/test/dnn_benchmarks/hpvm-c/CMakeLists.txt
@@ -43,7 +43,6 @@ foreach(dir ${entries})
   # Generate "tensor"-targeted code
   approxhpvm_py_codegen(
     ${dirname} ${dir}/${dirname}.cpp tensor 
-    --quant-file ${dir}/data/quant_ranges_rt.txt
     --conf-file ${dir}/data/tuner_confs.txt
   )
   # Run tensor binary
diff --git a/hpvm/tools/py-approxhpvm/main.py.in b/hpvm/tools/py-approxhpvm/main.py.in
index 0bb08a93564163c55653cf7a479faec886c1648c..9305b8a25a18541f545a1925947b1abe4465a3f8 100644
--- a/hpvm/tools/py-approxhpvm/main.py.in
+++ b/hpvm/tools/py-approxhpvm/main.py.in
@@ -34,13 +34,12 @@ def compile_hpvm_c(
     codegen_target: str = "tensor",
     include: List[PathLike] = None,
     working_dir: PathLike = None,
-    quant_file: PathLike = None,
     conf_file: PathLike = None,
 ):
     from subprocess import check_output
 
     codegen_functions = {
-        "tensor": lambda i, o: opt_codegen_tensor(i, o, quant_file, conf_file),
+        "tensor": lambda i, o: opt_codegen_tensor(i, o, conf_file),
         "cudnn": opt_codegen_cudnn
     }
     codegen_f = codegen_functions[codegen_target]
@@ -98,7 +97,7 @@ def opt_codegen_cudnn(src_file: PathLike, target_file: PathLike) -> List[str]:
 
 
 def opt_codegen_tensor(
-    src_file: PathLike, target_file: PathLike, quant_file: PathLike, conf_file: PathLike
+    src_file: PathLike, target_file: PathLike, conf_file: PathLike
 ):
     passes = [
         "LLVMBuildDFG", "LLVMInPlaceDFGAnalysis",
@@ -108,7 +107,6 @@ def opt_codegen_tensor(
     flags = [
         "buildDFG", "inplace", "hpvm-fuse",
         "dfg2llvm-wrapperapi",
-        f"quantization-levels-filename={quant_file}",
         f"configuration-inputs-filename={conf_file}",
         "dfg2llvm-cpu", "clearDFG",
     ]
@@ -159,10 +157,6 @@ def parse_args():
     parser.add_argument(
         "-d", "--working-dir", type=Path, help="Directory to generate temp files in"
     )
-    parser.add_argument(
-        "--quant-file", type=Path,
-        help="File to quantization levels of layers; required for 'tensor' target"
-    )
     parser.add_argument(
         "--conf-file", type=Path,
         help="File to approximation configurations; required for 'tensor' target"
@@ -174,10 +168,8 @@ def parse_args():
 
     args = parser.parse_args()
     if args.codegen_target == "tensor":
-        if args.quant_file is None:
-            parser.error('Codegen target "tensor" requires -quant-file argument')
         if args.conf_file is None:
-            parser.error('Codegen target "tensor" requires -conf-file argument')
+            parser.error('Codegen target "tensor" requires --conf-file argument')
     return args