mirror of https://github.com/xythrez/RustMP.git
Removed imports to ensure generated code consistency
This commit is contained in:
parent
31fca0b6d7
commit
570e884156
|
@ -1,8 +1,10 @@
|
||||||
use std::sync::*;
|
// libraries used:
|
||||||
//use std::sync::RwLock;
|
// std::sync::Arc;
|
||||||
use std::sync::atomic::*;
|
// std::sync::RwLock;
|
||||||
use std::thread;
|
// std::sync::atomic::AtomicIsize;
|
||||||
//use rustmp::rmp_parallel_for;
|
// std::sync::atomic::AtomicI32;
|
||||||
|
// std::sync::atomic::Ordering;
|
||||||
|
// std::thread;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A basic sequential function that we want to modify.
|
* A basic sequential function that we want to modify.
|
||||||
|
@ -32,10 +34,12 @@ fn aug_main() {
|
||||||
|
|
||||||
//#[rmp_parallel_for(shared(counter) schedule(static, 1))]
|
//#[rmp_parallel_for(shared(counter) schedule(static, 1))]
|
||||||
fn _loop(counter: &mut i32) {
|
fn _loop(counter: &mut i32) {
|
||||||
for i in 0..4 {
|
for i in 0..4 {
|
||||||
println!("Index {}: Hello from loop {}!", counter, i);
|
println!("Index {}: Hello from loop {}!", counter, i);
|
||||||
*counter += 1;
|
*counter += 1;
|
||||||
}} _loop(&mut counter);
|
}
|
||||||
|
}
|
||||||
|
_loop(&mut counter);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -49,70 +53,80 @@ fn rmp_main() {
|
||||||
let mut counter = 0;
|
let mut counter = 0;
|
||||||
|
|
||||||
fn _loop(counter: &mut i32) {
|
fn _loop(counter: &mut i32) {
|
||||||
// Startup - Populate environment variables using env::var
|
// Startup - Populate environment variables using env::var
|
||||||
let __rmp_internal_max_threads = 4;
|
let __rmp_internal_max_threads = 4;
|
||||||
|
|
||||||
// Startup - Populate macro parameters
|
// Startup - Populate macro parameters
|
||||||
let __rmp_internal_block_size = 1;
|
let __rmp_internal_block_size = 1;
|
||||||
|
|
||||||
// Startup - Initialize required arrays
|
// Startup - Initialize required arrays
|
||||||
let mut __rmp_internal_threads_arr = vec![];
|
let mut __rmp_internal_threads_arr = vec![];
|
||||||
let mut __rmp_internal_iter_arr = vec![];
|
let mut __rmp_internal_iter_arr = vec![];
|
||||||
for _ in 0..__rmp_internal_max_threads {
|
for _ in 0..__rmp_internal_max_threads {
|
||||||
__rmp_internal_iter_arr.push(vec![]);
|
__rmp_internal_iter_arr.push(vec![]);
|
||||||
}
|
|
||||||
let mut __rmp_internal_curr_block_size = 0;
|
|
||||||
let mut __rmp_internal_curr_block_thread = 0;
|
|
||||||
|
|
||||||
// Startup - Promote shared mutables into Arc references
|
|
||||||
// Idea - Possible optimization based on type? RwLock is expensive.
|
|
||||||
let __rmp_var_counter = Arc::new(AtomicI32::new(*counter));
|
|
||||||
|
|
||||||
// Execution - Precompute the iterations for each loop
|
|
||||||
// The 0..4 here should be parsed from the original tokens
|
|
||||||
for __rmp_internal_i in 0..4 {
|
|
||||||
__rmp_internal_iter_arr[__rmp_internal_curr_block_thread].push(__rmp_internal_i);
|
|
||||||
__rmp_internal_curr_block_size += 1;
|
|
||||||
if __rmp_internal_curr_block_size >= __rmp_internal_block_size {
|
|
||||||
__rmp_internal_curr_block_thread = (__rmp_internal_curr_block_thread + 1) % __rmp_internal_max_threads;
|
|
||||||
}
|
}
|
||||||
}
|
let mut __rmp_internal_curr_block_size = 0;
|
||||||
|
let mut __rmp_internal_curr_block_thread = 0;
|
||||||
|
|
||||||
// Startup - Extract the thread's own iterator
|
// Startup - Promote shared mutables into Arc references
|
||||||
let __rmp_internal_iter_self = __rmp_internal_iter_arr.remove(0);
|
// Idea - Possible optimization based on type? RwLock is expensive.
|
||||||
|
let __rmp_var_counter = std::sync::Arc::new(std::sync::atomic::AtomicI32::new(*counter));
|
||||||
|
|
||||||
// Execution - Spawn threads with loop contents
|
// Execution - Precompute the iterations for each loop
|
||||||
for __rmp_internal_iter in __rmp_internal_iter_arr {
|
// The 0..4 here should be parsed from the original tokens
|
||||||
// Clone used Arcs here
|
for __rmp_internal_i in 0..4 {
|
||||||
let __rmp_var_counter = Arc::clone(&__rmp_var_counter);
|
__rmp_internal_iter_arr[__rmp_internal_curr_block_thread].push(__rmp_internal_i);
|
||||||
|
__rmp_internal_curr_block_size += 1;
|
||||||
// Spawn threads
|
if __rmp_internal_curr_block_size >= __rmp_internal_block_size {
|
||||||
__rmp_internal_threads_arr.push(thread::spawn(move || {
|
__rmp_internal_curr_block_thread =
|
||||||
for i in __rmp_internal_iter {
|
(__rmp_internal_curr_block_thread + 1) % __rmp_internal_max_threads;
|
||||||
// Having separate load and fetch_add should be a data race,
|
|
||||||
// However, I believe OpenMP also treats it as a data race,
|
|
||||||
// so its fine to have this issue
|
|
||||||
// Need to implement #[rmp_critical] to update it correctly
|
|
||||||
println!("Index {}: Hello from loop {}!", __rmp_var_counter.load(Ordering::SeqCst), i);
|
|
||||||
__rmp_var_counter.fetch_add(1, Ordering::SeqCst);
|
|
||||||
}
|
}
|
||||||
}));
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Execution - Extract the same thread logic for self
|
// Startup - Extract the thread's own iterator
|
||||||
for i in __rmp_internal_iter_self {
|
let __rmp_internal_iter_self = __rmp_internal_iter_arr.remove(0);
|
||||||
println!("Index {}: Hello from loop {}!", __rmp_var_counter.load(Ordering::SeqCst), i);
|
|
||||||
__rmp_var_counter.fetch_add(1, Ordering::SeqCst);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cleanup - Wait for threads
|
// Execution - Spawn threads with loop contents
|
||||||
for __rmp_internal_thread in __rmp_internal_threads_arr {
|
for __rmp_internal_iter in __rmp_internal_iter_arr {
|
||||||
let _ = __rmp_internal_thread.join();
|
// Clone used Arcs here
|
||||||
}
|
let __rmp_var_counter = std::sync::Arc::clone(&__rmp_var_counter);
|
||||||
|
|
||||||
// Cleanup - Restore variables from Arc references
|
// Spawn threads
|
||||||
*counter = __rmp_var_counter.load(Ordering::SeqCst);
|
__rmp_internal_threads_arr.push(std::thread::spawn(move || {
|
||||||
} _loop(&mut counter);
|
for i in __rmp_internal_iter {
|
||||||
|
// Having separate load and fetch_add should be a data race,
|
||||||
|
// However, I believe OpenMP also treats it as a data race,
|
||||||
|
// so its fine to have this issue
|
||||||
|
// Need to implement #[rmp_critical] to update it correctly
|
||||||
|
println!(
|
||||||
|
"Index {}: Hello from loop {}!",
|
||||||
|
__rmp_var_counter.load(std::sync::atomic::Ordering::SeqCst),
|
||||||
|
i
|
||||||
|
);
|
||||||
|
__rmp_var_counter.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execution - Extract the same thread logic for self
|
||||||
|
for i in __rmp_internal_iter_self {
|
||||||
|
println!(
|
||||||
|
"Index {}: Hello from loop {}!",
|
||||||
|
__rmp_var_counter.load(std::sync::atomic::Ordering::SeqCst),
|
||||||
|
i
|
||||||
|
);
|
||||||
|
__rmp_var_counter.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup - Wait for threads
|
||||||
|
for __rmp_internal_thread in __rmp_internal_threads_arr {
|
||||||
|
let _ = __rmp_internal_thread.join();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup - Restore variables from Arc references
|
||||||
|
*counter = __rmp_var_counter.load(std::sync::atomic::Ordering::SeqCst);
|
||||||
|
}
|
||||||
|
_loop(&mut counter);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -120,17 +134,17 @@ fn rmp_main() {
|
||||||
* possible.
|
* possible.
|
||||||
*/
|
*/
|
||||||
fn par_main() {
|
fn par_main() {
|
||||||
let counter = Arc::new(AtomicIsize::new(0));
|
let counter = std::sync::Arc::new(std::sync::atomic::AtomicIsize::new(0));
|
||||||
let mut children = vec![];
|
let mut children = vec![];
|
||||||
|
|
||||||
for i in 1..4 {
|
for i in 1..4 {
|
||||||
let counter = Arc::clone(&counter);
|
let counter = std::sync::Arc::clone(&counter);
|
||||||
children.push(thread::spawn(move || {
|
children.push(std::thread::spawn(move || {
|
||||||
let index = counter.fetch_add(1, Ordering::SeqCst);
|
let index = counter.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||||
println!("Index {}: Hello from loop {}!", index, i);
|
println!("Index {}: Hello from loop {}!", index, i);
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
let index = counter.fetch_add(1, Ordering::SeqCst);
|
let index = counter.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||||
println!("Index {}: Hello from loop {}!", index, 0);
|
println!("Index {}: Hello from loop {}!", index, 0);
|
||||||
|
|
||||||
for child in children {
|
for child in children {
|
||||||
|
@ -138,7 +152,6 @@ fn par_main() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
println!("Running Sequential Version:");
|
println!("Running Sequential Version:");
|
||||||
seq_main();
|
seq_main();
|
||||||
|
|
Loading…
Reference in New Issue