summaryrefslogtreecommitdiff
path: root/mthread.cpp
diff options
context:
space:
mode:
authorBrett Weiland <brett_weiland@bpcspace.com>2023-01-23 19:11:54 -0600
committerBrett Weiland <brett_weiland@bpcspace.com>2023-01-23 19:11:54 -0600
commite63ea4982577f13523e5e97d4de6fc779fbc980d (patch)
treeaddfa77515c14bd7e1a55afb92c099ad7536648c /mthread.cpp
parentcbe23199c794d62fe84c4522d2209641f7107980 (diff)
parent713d467385959ac4299e685fffcad7ffff788758 (diff)
fixed readme conflict
Diffstat (limited to 'mthread.cpp')
-rw-r--r--mthread.cpp278
1 files changed, 155 insertions, 123 deletions
diff --git a/mthread.cpp b/mthread.cpp
index 796055e..7d1b31f 100644
--- a/mthread.cpp
+++ b/mthread.cpp
@@ -26,11 +26,12 @@ mthread::mthread(
histogram = g_histogram;
step = (c_max - c_min) / complex<double>(image.height(), image.width());
my_thread = NULL;
+ status.status_lock.lock();
}
void mthread::dispatch() {
if((my_thread) && (my_thread->joinable())) delete my_thread;
- my_thread = new thread([this] {render();});
+ my_thread = new thread([this] {run();});
}
@@ -46,151 +47,182 @@ void mthread::join() {
if((my_thread) && (my_thread->joinable())) my_thread->join();
}
-
-void mthread::render() {
- uint32_t image_width = image.width();
- unsigned int iter;
+bool mthread::find_work() {
unsigned int worker, workers_finished;
uint32_t loads[worker_cnt];
- double pixel_value;
- complex<double> c, a;
struct mthread_status *peer_status;
struct mthread_divinfo divinfo;
-
+ workers_finished = 0;
unique_lock<mutex> ack;
- unique_lock<mutex> syn_ack;
-
- status.status_lock.lock();
- status.searching = false;
- status.share_finished = false;
- status.div_syn = false;
- status.div_error = false;
+ status.status_lock.lock();
+ status.searching = true;
+ status.share_finished = true;
status.status_lock.unlock();
+ for(worker = 0; worker < worker_cnt; worker++) {
+ //lock other worker so we can request from them
+ peer_status = &workers[worker]->status;
+ peer_status->status_lock.lock();
- y_min = 0;
- y_max = image.height();
-
-
+ //if they're done, we remember that to see if we exit
+ if((peer_status->share_finished) && (worker != id)) {
+ workers_finished++;
+ }
+
+ /** The reason why we have this bit in code twice is to prevent race conditions.
+ * To add the potential canidate worker to our list to be sorted,
+ * we need to make sure it's not currently being divided (true if div_syn is set).
+ * Since we'll need to check the same later
+ * when we actually request to split it's workload,
+ * we might as well narrow it down and check everything else.
+ * **/
+ if((worker == id) ||
+ (peer_status->searching) || (peer_status->div_syn) ||
+ (peer_status->row_load < min_lines)) {
+ loads[worker] = 0;
+ peer_status->status_lock.unlock();
+ continue;
+ }
+ //finally, if they're valid, write them down
+ loads[worker] = peer_status->row_load;
+ peer_status->status_lock.unlock();
+ }
+ //exit if all workers are finished
+ if(workers_finished >= worker_cnt - 1) {
+ return false;
+ }
+ //then we look over and pick our canidates
for(;;) {
- //thread is actively rendering
- for(on_y = y_min; on_y < y_max; on_y++) {
- progress++;
- status.status_lock.lock();
- status.row_load = y_max - on_y;
- if(status.div_syn) {
-
- status.div_error = status.row_load <= min_lines;
-
- if(status.div_error) {
- status.ack_lk.unlock();
- status.msg_notify.notify_all();
- }
- else {
- syn_ack = unique_lock<mutex>(status.syn_ack_lk);
- status.ack_lk.unlock();
- status.msg_notify.notify_all();
- status.msg_notify.wait(syn_ack);
- status.row_load = y_max - on_y;
- syn_ack.unlock();
- //new x/y min/max is ajusted by other thread, we can continue as normal.
- }
- }
- status.status_lock.unlock();
-
- for(on_x = x_min; on_x < x_max; on_x++) {
- c = (step * complex<double>(on_x,on_y)) + c_min;
- a = 0;
- for(iter = 0; iter < max_iter; iter++) {
- if(abs(a) >= inf_cutoff) break;
- a = a*a + c;
- }
- if(iter >= max_iter) {
- iter = 0;
- vmap[(on_y * image_width) + on_x] = 0;
- }
- else {
- pixel_value = (iter + 1) - (log((log(pow(abs(a), 2.0)) / 2.0) / log(2.0)));
- vmap[(on_y * image_width) + on_x] = pixel_value;
- histogram[(int)pixel_value]++;
- }
- }
+ //find the worker who has the biggest workload
+ worker = distance(loads, max_element(loads, &loads[worker_cnt]));
+ if(!loads[worker]) return true; //we have found a worker; distance is 0
+ peer_status = &workers[worker]->status;
+ peer_status->status_lock.lock();
+
+ //re-check to see if canidate is valid (may have changed since added and sorted).
+ //If you're wondering why we're doing this bit again, see comment before first time.
+ if((peer_status->searching) || (peer_status->div_syn) ||
+ (peer_status->row_load < min_lines)) {
+ loads[worker] = 0;
+ peer_status->status_lock.unlock();
+ continue;
+ }
+ //ack: ask peer to devide
+ ack = unique_lock<mutex>(peer_status->ack_lk);
+ peer_status->div_syn = true;
+ peer_status->status_lock.unlock();
+ //when they reset it, they've acknowlaged and have set appropriate messages
+ peer_status->msg_notify.wait(ack);
+ ack.unlock();
+ //area is too small to divide reliably
+ if(peer_status->div_error) {
+ loads[worker] = 0;
+ peer_status->status_lock.lock();
+ peer_status->div_error = false;
+ peer_status->div_syn = false;
+ peer_status->status_lock.unlock();
+ continue;
}
+ //otherwise, we modify their struct for them to split their workload
+ divinfo = workers[worker]->divide();
+ peer_status->syn_ack_lk.unlock();
- //thread is now searching for work
-
- /** 2022 comment:
- * this state should have been moved to a seperate function to allow rendering methods to differ
- * from inherited mthreads without needing to reimpliment searching **/
- status.status_lock.lock();
- status.searching = true;
- status.share_finished = true;
+ //tell them we're done modifying
+ peer_status->msg_notify.notify_all();
+
+ //set our own stuff
+ y_min = divinfo.y_min;
+ y_max = divinfo.y_max;
+ x_min = divinfo.x_min;
+ x_max = divinfo.x_max;
+
+ //update our status
+ status.status_lock.lock();
+ status.searching = false;
status.status_lock.unlock();
+ return true;
+ }
+ return true;
+}
+
+//makes sure no one is asking for work from us
+void mthread::check_work_request() {
+ unique_lock<mutex> syn_ack;
+
+ status.status_lock.lock();
+ status.row_load = y_max - on_y;
+ //check if anyone's asking us to divide
+ if(status.div_syn) {
+ status.div_error = status.row_load <= min_lines;
+ if(status.div_error) {
+ status.ack_lk.unlock();
+ status.msg_notify.notify_all();
+ }
+ else {
+ syn_ack = unique_lock<mutex>(status.syn_ack_lk);
+ status.ack_lk.unlock();
+ status.msg_notify.notify_all();
+ status.msg_notify.wait(syn_ack);
+ status.row_load = y_max - on_y;
+ syn_ack.unlock();
+ //new x/y min/max is ajusted by other thread, we can continue as normal.
+ }
+ }
+ status.status_lock.unlock();
+}
- while(status.searching) {
- workers_finished = 0;
- for(worker = 0; worker < worker_cnt; worker++) {
- peer_status = &workers[worker]->status;
- peer_status->status_lock.lock();
-
- if((peer_status->share_finished) && (worker != id)) {
- workers_finished++;
- }
- if((worker == id) ||
- (peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) {
- loads[worker] = 0;
- peer_status->status_lock.unlock();
- continue;
- }
- loads[worker] = peer_status->row_load;
- peer_status->status_lock.unlock();
+//renders area
+void mthread::render_area() {
+ uint32_t image_width = image.width();
+ unsigned int iter;
+ complex<double> c, z;
+ double pixel_value;
+
+ /** pixel is drawn based on if/how many itertions it takes to get to infinity
+ * while computing z^2+c. **/
+
+ for(on_y = y_min; on_y < y_max; on_y++) {
+ progress++;
+ check_work_request();
+ for(on_x = x_min; on_x < x_max; on_x++) {
+ c = (step * complex<double>(on_x,on_y)) + c_min;
+ z = 0;
+ for(iter = 0; iter < max_iter; iter++) {
+ if(abs(z) >= inf_cutoff) break;
+ z = z*z + c;
}
- if(workers_finished >= worker_cnt - 1) {
- return;
+ if(iter >= max_iter) {
+ iter = 0;
+ vmap[(on_y * image_width) + on_x] = 0;
}
- for(;;) {
- worker = distance(loads, max_element(loads, &loads[worker_cnt]));
- if(!loads[worker]) break;
- peer_status = &workers[worker]->status;
- peer_status->status_lock.lock();
- if((peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) {
- loads[worker] = 0;
- peer_status->status_lock.unlock();
- continue;
- }
- ack = unique_lock<mutex>(peer_status->ack_lk);
- peer_status->div_syn = true;
- peer_status->status_lock.unlock();
- peer_status->msg_notify.wait(ack);
- ack.unlock();
- if(peer_status->div_error) {
- loads[worker] = 0;
- peer_status->status_lock.lock();
- peer_status->div_error = false;
- peer_status->div_syn = false;
- peer_status->status_lock.unlock();
- continue;
- }
-
- divinfo = workers[worker]->divide();
- peer_status->syn_ack_lk.unlock();
- peer_status->msg_notify.notify_all();
- y_min = divinfo.y_min;
- y_max = divinfo.y_max;
- x_min = divinfo.x_min;
- x_max = divinfo.x_max;
- status.status_lock.lock();
- status.searching = false;
- status.status_lock.unlock();
- break;
+ else {
+ pixel_value = (iter + 1) - (log((log(pow(abs(z), 2.0)) / 2.0) / log(2.0)));
+ vmap[(on_y * image_width) + on_x] = pixel_value;
+ histogram[(int)pixel_value]++;
}
}
}
}
+//alternates states of finding work work and rendering
+void mthread::run() {
+ //locks in initilizer
+ status.searching = false;
+ status.share_finished = false;
+ status.div_syn = false;
+ status.div_error = false;
+ status.status_lock.unlock();
+
+ do {
+ render_area();
+ } while (find_work());
+
+}
+
+
struct mthread_divinfo mthread::divide() {
struct mthread_divinfo ret;
ret.x_min = x_min;