From 9e7e4c8ab3cfa2a4ddb1adc072a6bd314d7e57a2 Mon Sep 17 00:00:00 2001
From: Praneet Rathi <prrathi10@gmail.com>
Date: Sat, 18 Jan 2025 12:19:31 -0600
Subject: [PATCH] smol

---
 hercules_cg/src/gpu.rs | 30 +++++++++++++++---------------
 1 file changed, 15 insertions(+), 15 deletions(-)

diff --git a/hercules_cg/src/gpu.rs b/hercules_cg/src/gpu.rs
index 23e58c14..731e57e3 100644
--- a/hercules_cg/src/gpu.rs
+++ b/hercules_cg/src/gpu.rs
@@ -1,7 +1,7 @@
 extern crate bitvec;
 extern crate hercules_ir;
 
-use std::collections::{BTreeMap, HashMap, HashSet, VecDeque};
+use std::collections::{BTreeMap, HashMap, HashSet};
 use std::fmt::{Error, Write};
 
 use self::hercules_ir::*;
@@ -135,7 +135,7 @@ pub fn gpu_codegen<W: Write>(
     // Obtain the Return node and if it's a collection, use the collection objects
     // analysis to determine the origin. Also save the return node id for later
     // conversion of primitive Return into Parameter.
-    let (return_node_id, data_node_id) = {
+    let (_, data_node_id) = {
         let pos = function
             .nodes
             .iter()
@@ -563,11 +563,11 @@ int main() {{
                     write!(w, "\t{} p{} = 1;\n", param_type, idx)?;
                 } else {
                     let param_size = self.get_size(*ty, None, None);
-                    write!(w, "\t{} p{};\n", param_type, idx);
+                    write!(w, "\t{} p{};\n", param_type, idx)?;
                     write!(w, "\tif (cudaMalloc((void**)&p{}, {}) != cudaSuccess) {{\n", idx, param_size)?;
                     write!(w, "\t\tprintf(\"Error allocating memory for parameter %d\\n\", {});\n", idx)?;
-                    write!(w, "\t\treturn -1;\n");
-                    write!(w, "\t}}\n");
+                    write!(w, "\t\treturn -1;\n")?;
+                    write!(w, "\t}}\n")?;
                 }
                 write!(pass_args, "p{}", idx)?;
             }
@@ -579,20 +579,20 @@ int main() {{
                 write!(w, "\tif (cudaMalloc((void**)&ret, sizeof({})) != cudaSuccess) {{\n", ret_type_no_pnt)?;
                 write!(w, "\t\tprintf(\"Error allocating memory for return value\\n\");\n")?;
                 write!(w, "\t\treturn -1;\n")?;
-                write!(w, "\t}}\n");
+                write!(w, "\t}}\n")?;
                 write!(pass_args, ", ret")?;
             }
-            write!(w, "\t{}<<<{}, {}, {}>>>({});\n", self.function.name, num_blocks, num_threads, dynamic_shared_offset, pass_args);
+            write!(w, "\t{}<<<{}, {}, {}>>>({});\n", self.function.name, num_blocks, num_threads, dynamic_shared_offset, pass_args)?;
             write!(w, "\tbool skip = false;\n")?;
             write!(w, "\tcudaError_t err = cudaGetLastError();\n")?;
             write!(w, "\tif (err != cudaSuccess) {{\n")?;
             write!(w, "\t\tprintf(\"Error launching kernel: %s\\n\", cudaGetErrorString(err));\n")?;
             write!(w, "\t\tskip = true;\n")?;
-            write!(w, "\t}}\n");
+            write!(w, "\t}}\n")?;
             write!(w, "\tif (cudaDeviceSynchronize() != cudaSuccess && !skip) {{\n")?;
             write!(w, "\t\tprintf(\"Error synchronizing device\\n\");\n")?;
             write!(w, "\t\tskip = true;\n")?;
-            write!(w, "\t}}\n");
+            write!(w, "\t}}\n")?;
             for (idx, ty) in self.function.param_types.iter().enumerate() {
                 if !self.types[ty.idx()].is_primitive() {
                     write!(w, "\tcudaFree(p{});\n", idx)?;
@@ -1122,7 +1122,7 @@ extern \"C\" {} {}(", if ret_primitive { ret_type.clone() } else { "void".to_str
             // want to "allocate" and initialize it once.
             Node::Constant { id: cons_id } => {
                 let is_primitive = self.types[self.typing[id.idx()].idx()].is_primitive();
-                if (!is_primitive) {
+                if !is_primitive {
                     let cg_tile = {
                         let KernelState::OutBlock = state else {
                             panic!("Expected constant to be in start basic block
@@ -1142,7 +1142,7 @@ extern \"C\" {} {}(", if ret_primitive { ret_type.clone() } else { "void".to_str
                     w,
                     *num_tabs,
                 )?;
-                if (!is_primitive) {
+                if !is_primitive {
                     write!(w, "{}}}\n", tabs)?;
                     *num_tabs -= 1;
                 }
@@ -1679,9 +1679,9 @@ extern \"C\" {} {}(", if ret_primitive { ret_type.clone() } else { "void".to_str
                             dynamic_shared_offset,
                             w,
                             num_tabs,
-                        );
+                        )?;
                     } else if !field_constant.is_array() {
-                        self.codegen_constant(format!("{}+{}", name, offset), constant_fields[i], false, extra_dim_collects, dynamic_shared_offset, w, num_tabs);
+                        self.codegen_constant(format!("{}+{}", name, offset), constant_fields[i], false, extra_dim_collects, dynamic_shared_offset, w, num_tabs)?;
                     }
                 }
             }
@@ -1710,9 +1710,9 @@ extern \"C\" {} {}(", if ret_primitive { ret_type.clone() } else { "void".to_str
                         dynamic_shared_offset,
                         w,
                         num_tabs,
-                    );
+                    )?;
                 } else if !variant_constant.is_array() {
-                    self.codegen_constant(name, *field, false, extra_dim_collects, dynamic_shared_offset, w, num_tabs);
+                    self.codegen_constant(name, *field, false, extra_dim_collects, dynamic_shared_offset, w, num_tabs)?;
                 };
             }
             Constant::Array(type_id) => {
-- 
GitLab