diff --git a/hercules_cg/src/gpu.rs b/hercules_cg/src/gpu.rs
index c07ed4e5cd10b630393a25f8cb3e61472d6070d9..d802a274e95b151db0efefe1ec53ba1fdb43a185 100644
--- a/hercules_cg/src/gpu.rs
+++ b/hercules_cg/src/gpu.rs
@@ -171,6 +171,62 @@ pub fn gpu_codegen<W: Write>(
         threads_per_warp: 32,
     };
 
+    // Check for whether we need to emit synchronization for joins.
+    let avoid_join_sync = (|| {
+        // Check for simple block/thread fork structure.
+        let Some(root_forks) = fork_tree.get(&NodeID::new(0)) else {
+            return false;
+        };
+        if root_forks.len() != 1 {
+            return false;
+        }
+        let block = *root_forks.into_iter().next().unwrap();
+        let Some(block_forks) = fork_tree.get(&block) else {
+            return false;
+        };
+        if block_forks.len() != 1 {
+            return false;
+        }
+        let thread = *block_forks.into_iter().next().unwrap();
+        if let Some(thread_forks) = fork_tree.get(&thread) && !thread_forks.is_empty() {
+            return false;
+        }
+
+        // Check that the results from the thread fork aren't needed further
+        // inside this kernel.
+        let thread_join = fork_join_map[&thread];
+        let block_join = fork_join_map[&block];
+        let thread_reduces: Vec<_> = def_use_map.get_users(thread_join).as_ref().into_iter().filter(|id| function.nodes[id.idx()].is_reduce()).collect();
+        let block_reduces: Vec<_> = def_use_map.get_users(block_join).as_ref().into_iter().filter(|id| function.nodes[id.idx()].is_reduce()).collect();
+        for id in thread_reduces {
+            if !function.schedules[id.idx()].contains(&Schedule::ParallelReduce) {
+                return false;
+            }
+            let users = def_use_map.get_users(*id);
+            if users.len() > 1 {
+                return false;
+            }
+            let user = users.into_iter().next().unwrap();
+            if !block_reduces.contains(&user) {
+                return false;
+            }
+        }
+        for id in block_reduces {
+            if !function.schedules[id.idx()].contains(&Schedule::ParallelReduce) {
+                return false;
+            }
+            let users = def_use_map.get_users(*id);
+            if users.len() > 1 {
+                return false;
+            }
+            let user = users.into_iter().next().unwrap();
+            if !function.nodes[user.idx()].is_return() {
+                return false;
+            }
+        }
+        true
+    })();
+
     let ctx = GPUContext {
         module_name,
         function,
@@ -192,6 +248,7 @@ pub fn gpu_codegen<W: Write>(
         control_data_phi_map,
         return_parameters,
         kernel_params,
+        avoid_join_sync,
         generated_sync: RefCell::new(false),
     };
     ctx.codegen_function(w)
@@ -223,6 +280,7 @@ struct GPUContext<'a> {
     control_data_phi_map: HashMap<NodeID, Vec<(NodeID, NodeID)>>,
     return_parameters: Vec<Option<usize>>,
     kernel_params: &'a GPUKernelParams,
+    avoid_join_sync: bool,
     generated_sync: RefCell<bool>,
 }
 
@@ -1815,10 +1873,12 @@ namespace cg = cooperative_groups;
                         write!(w_term, "\t}}\n")?;
                         tabs += 1;
                     }
-                    let fork = self.join_fork_map.get(&id).unwrap();
-                    let cg_tile_available = self.get_cg_tile(*fork, CGType::Available);
-                    write!(w_term, "\t{}.sync();\n", cg_tile_available)?;
-                    *self.generated_sync.borrow_mut() = true;
+                    if !self.avoid_join_sync {
+                        let fork = self.join_fork_map.get(&id).unwrap();
+                        let cg_tile_available = self.get_cg_tile(*fork, CGType::Available);
+                        write!(w_term, "\t{}.sync();\n", cg_tile_available)?;
+                        *self.generated_sync.borrow_mut() = true;
+                    }
                 }
                 // If the Fork was parallelized, each thread or UsedPerId tile of
                 // threads only runs one ThreadID, so we can jump straight to the