Skip to content
Snippets Groups Projects

Fine-tune scheduling of parallel reductions

Merged rarbore2 requested to merge gcm_nested_parallel_forks2 into main
23 files
+ 226
129
Compare changes
  • Side-by-side
  • Inline
Files
23
@@ -9,11 +9,16 @@ use crate::*;
* c) no domination by any other fork that's also dominated by F, where we do count self-domination
* Here too we include the non-fork start node, as key for all controls outside any fork.
*/
pub fn fork_control_map(fork_join_nesting: &HashMap<NodeID, Vec<NodeID>>) -> HashMap<NodeID, HashSet<NodeID>> {
pub fn fork_control_map(
fork_join_nesting: &HashMap<NodeID, Vec<NodeID>>,
) -> HashMap<NodeID, HashSet<NodeID>> {
let mut fork_control_map = HashMap::new();
for (control, forks) in fork_join_nesting {
let fork = forks.first().copied().unwrap_or(NodeID::new(0));
fork_control_map.entry(fork).or_insert_with(HashSet::new).insert(*control);
fork_control_map
.entry(fork)
.or_insert_with(HashSet::new)
.insert(*control);
}
fork_control_map
}
@@ -24,13 +29,19 @@ pub fn fork_control_map(fork_join_nesting: &HashMap<NodeID, Vec<NodeID>>) -> Has
* c) no domination by any other fork that's also dominated by F, where we don't count self-domination
* Note that the fork_tree also includes the non-fork start node, as unique root node.
*/
pub fn fork_tree(function: &Function, fork_join_nesting: &HashMap<NodeID, Vec<NodeID>>) -> HashMap<NodeID, HashSet<NodeID>> {
pub fn fork_tree(
function: &Function,
fork_join_nesting: &HashMap<NodeID, Vec<NodeID>>,
) -> HashMap<NodeID, HashSet<NodeID>> {
let mut fork_tree = HashMap::new();
for (control, forks) in fork_join_nesting {
if function.nodes[control.idx()].is_fork() {
fork_tree.entry(*control).or_insert_with(HashSet::new);
let nesting_fork = forks.get(1).copied().unwrap_or(NodeID::new(0));
fork_tree.entry(nesting_fork).or_insert_with(HashSet::new).insert(*control);
fork_tree
.entry(nesting_fork)
.or_insert_with(HashSet::new)
.insert(*control);
}
}
fork_tree
Loading