diff options
author | Brett Weiland <brett_weiland@bpcspace.com> | 2023-01-23 14:43:30 -0600 |
---|---|---|
committer | Brett Weiland <brett_weiland@bpcspace.com> | 2023-01-23 14:43:30 -0600 |
commit | 3b97e627caa6883391bd1e9f3c31172e10bf7404 (patch) | |
tree | 534962ac997ec72434f964129d4ba17ad792ce95 /mthread.cpp | |
parent | 61bd7df99d32b8c489383568afc53117ab1fa81f (diff) |
finished functions to achieve polymorphism
Diffstat (limited to 'mthread.cpp')
-rw-r--r-- | mthread.cpp | 292 |
1 files changed, 135 insertions, 157 deletions
diff --git a/mthread.cpp b/mthread.cpp index 269155d..4af25ff 100644 --- a/mthread.cpp +++ b/mthread.cpp @@ -30,7 +30,7 @@ mthread::mthread( void mthread::dispatch() { if((my_thread) && (my_thread->joinable())) delete my_thread; - my_thread = new thread([this] {render();}); + my_thread = new thread([this] {run();}); } @@ -46,185 +46,163 @@ void mthread::join() { if((my_thread) && (my_thread->joinable())) my_thread->join(); } -//TODO make final -//looks for work -void mthread::find_work() { -} - -//makes sure no one is asking for work from us -void mthread::check_work_request() { -} - -//renders area -void mthread::render_area() { //TODO rename -} - -//alternates states of finding work work and rendering -void mthread::run() { - for(;;) { - } -} - - -//TODO move syncronization to another function for extensibility -void mthread::render() { - uint32_t image_width = image.width(); - unsigned int iter; +bool mthread::find_work() { unsigned int worker, workers_finished; uint32_t loads[worker_cnt]; - double pixel_value; - complex<double> c, a; struct mthread_status *peer_status; struct mthread_divinfo divinfo; - + workers_finished = 0; unique_lock<mutex> ack; - unique_lock<mutex> syn_ack; - - status.status_lock.lock(); - status.searching = false; - status.share_finished = false; - status.div_syn = false; - status.div_error = false; + status.status_lock.lock(); + status.searching = true; + status.share_finished = true; status.status_lock.unlock(); + //TODO do we really need this whole for loop? + for(worker = 0; worker < worker_cnt; worker++) { + //lock other worker so we can request from them + peer_status = &workers[worker]->status; + peer_status->status_lock.lock(); - y_min = 0; - y_max = image.height(); + //if they're done, we remember that to see if we exit + if((peer_status->share_finished) && (worker != id)) { + workers_finished++; + } + //if they're us, currently looking for work, + //or don't have enough work for us to complete, then skip + + if((worker == id) || + (peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) { + loads[worker] = 0; + peer_status->status_lock.unlock(); + continue; + } + //finally, if they're valid, write them down + loads[worker] = peer_status->row_load; + peer_status->status_lock.unlock(); + } + //exit if all workers are finished + if(workers_finished >= worker_cnt - 1) { + return false; + } + //then we look over and pick our canidates + for(;;) { + //find the worker who has the biggest workload + worker = distance(loads, max_element(loads, &loads[worker_cnt])); + if(!loads[worker]) break; //we have found a worker; distance is 0 + peer_status = &workers[worker]->status; + peer_status->status_lock.lock(); + //check to see if canidate is valid. + //TODO do we really need to check the first time? + if((peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) { + loads[worker] = 0; + peer_status->status_lock.unlock(); + continue; + } + ack = unique_lock<mutex>(peer_status->ack_lk); + peer_status->div_syn = true; + peer_status->status_lock.unlock(); + peer_status->msg_notify.wait(ack); + ack.unlock(); + if(peer_status->div_error) { + loads[worker] = 0; + peer_status->status_lock.lock(); + peer_status->div_error = false; + peer_status->div_syn = false; + peer_status->status_lock.unlock(); + continue; + } + divinfo = workers[worker]->divide(); + peer_status->syn_ack_lk.unlock(); + peer_status->msg_notify.notify_all(); + y_min = divinfo.y_min; + y_max = divinfo.y_max; + x_min = divinfo.x_min; + x_max = divinfo.x_max; + status.status_lock.lock(); + status.searching = false; + status.status_lock.unlock(); + break; + } + return true; +} +//makes sure no one is asking for work from us +void mthread::check_work_request() { + unique_lock<mutex> syn_ack; - for(;;) { - //thread is actively rendering - for(on_y = y_min; on_y < y_max; on_y++) { - progress++; - status.status_lock.lock(); + status.status_lock.lock(); + status.row_load = y_max - on_y; + //check if anyone's asking us to divide + if(status.div_syn) { + status.div_error = status.row_load <= min_lines; + if(status.div_error) { + status.ack_lk.unlock(); + status.msg_notify.notify_all(); + } + else { + syn_ack = unique_lock<mutex>(status.syn_ack_lk); + status.ack_lk.unlock(); + status.msg_notify.notify_all(); + status.msg_notify.wait(syn_ack); status.row_load = y_max - on_y; - //check if anyone's asking us to divide - if(status.div_syn) { - - status.div_error = status.row_load <= min_lines; - - if(status.div_error) { - status.ack_lk.unlock(); - status.msg_notify.notify_all(); - } - else { - syn_ack = unique_lock<mutex>(status.syn_ack_lk); - status.ack_lk.unlock(); - status.msg_notify.notify_all(); - status.msg_notify.wait(syn_ack); - status.row_load = y_max - on_y; - syn_ack.unlock(); - //new x/y min/max is ajusted by other thread, we can continue as normal. - } - } - status.status_lock.unlock(); - - for(on_x = x_min; on_x < x_max; on_x++) { - c = (step * complex<double>(on_x,on_y)) + c_min; - a = 0; - for(iter = 0; iter < max_iter; iter++) { - if(abs(a) >= inf_cutoff) break; - a = a*a + c; - } - if(iter >= max_iter) { - iter = 0; - vmap[(on_y * image_width) + on_x] = 0; - } - else { - pixel_value = (iter + 1) - (log((log(pow(abs(a), 2.0)) / 2.0) / log(2.0))); - vmap[(on_y * image_width) + on_x] = pixel_value; - histogram[(int)pixel_value]++; - } - } + syn_ack.unlock(); + //new x/y min/max is ajusted by other thread, we can continue as normal. } + } + status.status_lock.unlock(); +} +//renders area +void mthread::render_area() { + uint32_t image_width = image.width(); + unsigned int iter; + complex<double> c, a; //TODO comment + double pixel_value; - //thread is now searching for work - - /** 2022 comment: - * this state should have been moved to a seperate function to allow rendering methods to differ - * from inherited mthreads without needing to reimpliment searching **/ - status.status_lock.lock(); - status.searching = true; - status.share_finished = true; - status.status_lock.unlock(); - - //first we look over all workers to see which are canidates to ask for work - while(status.searching) { - workers_finished = 0; - //TODO do we really need this whole for loop? - for(worker = 0; worker < worker_cnt; worker++) { - //lock other worker so we can request from them - peer_status = &workers[worker]->status; - peer_status->status_lock.lock(); - - //if they're done, we remember that to see if we exit - if((peer_status->share_finished) && (worker != id)) { - workers_finished++; - } - //if they're us, currently looking for work, - //or don't have enough work for us to complete, then skip - - if((worker == id) || - (peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) { - loads[worker] = 0; - peer_status->status_lock.unlock(); - continue; - } - //finally, if they're valid, write them down - loads[worker] = peer_status->row_load; - peer_status->status_lock.unlock(); + for(on_y = y_min; on_y < y_max; on_y++) { + progress++; + check_work_request(); + for(on_x = x_min; on_x < x_max; on_x++) { + c = (step * complex<double>(on_x,on_y)) + c_min; + a = 0; + for(iter = 0; iter < max_iter; iter++) { + if(abs(a) >= inf_cutoff) break; + a = a*a + c; } - //exit if all workers are finished - if(workers_finished >= worker_cnt - 1) { - return; + if(iter >= max_iter) { + iter = 0; + vmap[(on_y * image_width) + on_x] = 0; } - //then we look over and pick our canidates - for(;;) { - //find the worker who has the biggest workload - worker = distance(loads, max_element(loads, &loads[worker_cnt])); - if(!loads[worker]) break; //we have found a worker; distance is 0 - peer_status = &workers[worker]->status; - peer_status->status_lock.lock(); - //check to see if canidate is valid. - //TODO do we really need to check the first time? - if((peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) { - loads[worker] = 0; - peer_status->status_lock.unlock(); - continue; - } - ack = unique_lock<mutex>(peer_status->ack_lk); - peer_status->div_syn = true; - peer_status->status_lock.unlock(); - peer_status->msg_notify.wait(ack); - ack.unlock(); - if(peer_status->div_error) { - loads[worker] = 0; - peer_status->status_lock.lock(); - peer_status->div_error = false; - peer_status->div_syn = false; - peer_status->status_lock.unlock(); - continue; - } - - divinfo = workers[worker]->divide(); - peer_status->syn_ack_lk.unlock(); - peer_status->msg_notify.notify_all(); - y_min = divinfo.y_min; - y_max = divinfo.y_max; - x_min = divinfo.x_min; - x_max = divinfo.x_max; - status.status_lock.lock(); - status.searching = false; - status.status_lock.unlock(); - break; + else { + pixel_value = (iter + 1) - (log((log(pow(abs(a), 2.0)) / 2.0) / log(2.0))); + vmap[(on_y * image_width) + on_x] = pixel_value; + histogram[(int)pixel_value]++; } } } } +//alternates states of finding work work and rendering +void mthread::run() { + status.status_lock.lock(); //TODO move to initilizer + status.searching = false; + status.share_finished = false; + status.div_syn = false; + status.div_error = false; + status.status_lock.unlock(); + + do { + render_area(); + } while (find_work()); + +} + + +//TODO move syncronization to another function for extensibility + struct mthread_divinfo mthread::divide() { struct mthread_divinfo ret; ret.x_min = x_min; |