From 3b97e627caa6883391bd1e9f3c31172e10bf7404 Mon Sep 17 00:00:00 2001 From: Brett Weiland Date: Mon, 23 Jan 2023 14:43:30 -0600 Subject: [PATCH] finished functions to achieve polymorphism --- docs/p4.pdf | Bin 5197678 -> 5197666 bytes mthread.cpp | 298 ++++++++++++++++++++++++---------------------------- mthread.hpp | 4 +- mthread_old | 269 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 410 insertions(+), 161 deletions(-) create mode 100644 mthread_old diff --git a/docs/p4.pdf b/docs/p4.pdf index 86c225c76818017d53ce92103876c5b1d02f71a7..4b0bff808184c1b706b8737b0061b7462733dcdd 100644 GIT binary patch delta 2182 zcmZY82~bnl8UWy!ut-=11!N0bA|R0D-rU?=_8lRTfFRI98$^t)XcWX!K)pV>EKjj2 zOXJhWA_5O#C>GFIZJ-s@DhLH(1W^hiSkZ#Rrgj6XU_cpIsf_3Ig=%F z@-yHVv4$WM6hehiCDaIYVlAOTPzg;!i_j)?2wg&tSV!m+2E=+|17S!Q5yr$u!h|p- z%m{PBqKPJP#MwL#56goo$-8#!qkjGG;_#4@rfeW2 z0iK>}5;*)?Q;aJnEuOwav;Ee@J9peqI5hg~D>w~H#?w;zFL>Sh>9UWYLDVVm)=D$Z z_IobfzhprQb0o7!diNYMOxjNLJ2KXctGBq_%)7YWF1Dy{L!)6ZWNURc>+QJaIzjxl zyq6}YM;YI}dY2fh=$+EOvSa-zem?J$f`vr$vA+GPqbHD~=7QC?v@UfUgp3=F4)%97X9!-@{n532ebM_w zo7I>Dv{1i3d@OXQ#k8m>*P)^?B(2Bo6oa+BY~-e=yP$liF=sN*|~N2(y++ZA@0{p z)9IU>1Xpz$MxrTq{>U6U5;Uw^?~{@2Npf|1!p)&M(*R4r3a|!j09$|#Fn~>f9bgYQ z$Y{=+Gle*c{r#M?!UuyX-daaW>(A}}`AaBBEcPZo-&2Nu&*)`Xk+|F41U*ORT!U5=|zPm;SzgDUQ-ESg)tJN#EG%pv7J4NdCB*-U?@JBO0n!Q!67>m~S{VcND z2`fEdwkMW!@_hT$vAN#NbS(N>Npn}KS!rD7>`Zb^v0PLibxU#wp7@?erQpy~t%Yw2 zMTOZ?`{IYoz2A1mzbVg;>YSIB7!)7$Kn_<+ndL-K%0I`- z&#B$zTsf?Of_-<9Z@;YZU=F$7uP!)UcPKyA*j|yc*Uyb^kY#3n_M(fxMe+Nr?m>Kq z=xQCETHrLfW#l<@+2DPny7f(J`a^|m++cfcOH~dN-wZecPJlDO1XutYfB+ca00_VZ zP=E(ufD7OXxB>2f2jB^K0p5TQ;0yQx{y+c_2y6j@fM9?RY?U$b5Y69k`0w`uynlY1 zmzKP%{#@hmUZ|i$J@-o8WJ%HSp}x=bB9q=P$MsZ6{Qb}53mD9)2b&+1si$>@eZKJ$ z&by8pQBU`!6Q&n^>xGfwrd%uOHvO|Si}$LeazB+gl4bt(`3T#Zwg4Z}INx*S?MGHW ziP&e`s9(7;KlhhgiS7PtGc>Grx{Q1uGkoJKQBvQ-!`z=DFMVP5<+B9FqfHiL_B4&B z5vFrDQSgN3c)pKV^*yy|P-qj)@zztYGV>MUuQX*oLWxj3si#5`Ip z`|1(|vlZyneC&&;itqm43$ZSmd5d7^BjwNg5vy@m;!4F}hC| zG4#*m54i|v60DSqz$~`%dnY!}+dDl5mXV}&mk z!_-E*d~CAhaa*HYs$FCi*-f&PTvCIy(S}<2-oCHrYfa~Qe$V@y|MUO9m;d_?pKGK| z0SS~cMWdEc%PAFV1+|h|MXjdjlq#i0sZ$?N8k8obMQKwEY7MoPT1V+nOiGvfkkX@A zls;upV;Esc!jPjQJL+iU-q3LRvAlfW{sSyX?ZmAAT)kI?SlG15Z1TpwnLPMb6Hmvi zs93(pHx9h-{#gCm_my!Q^xMan4%cT59!jB?HlnFy)g(Jxf939p*=Ma9kLJ7eBeKZ9 z<=pfuef~FI&);{D^$#+&RZiB!7TG;kXYKOZg&j}Gt3xar!^QQ&X>qdyp#qKixEW3N z=Qq91w`@a7w)*k73;)iZ4^MtL6qB_#r18ALjsxDYAqJ=pS;^G1 z4oj#$LUsmJ1y#k}EFbGa44qki6HZ-=cg~FKg$1a;YICpoHu%)xFll{Q=VaQC;GKH&o|(?fDqhCUXbT6ns_TG z{`BjJXKY@8Mb*$9&ztA{Jnkl&s zDw$?gTA4~SwFbcwXU2Mgr*8S0C~JxMGn!GV^k%Nn(d_3k{nzPX4Tok|$mU&^p9s(0 zc`%ab>Auvre{815n$7>~VN@vc@cSU9pVj%PP`V;#+++dgN54%%v*{Mzl)KAZlcz&#BSiIWxVJIazBFIrMF+q)d~0vju& zHi^@}^B;Iwr$V}sDRUhMH?GcEEnZ*#$H1{JGq38Ecj0`L8C3I3auPRX^y!)kUY45C zr(N4cN6CU<<8p$YeRC?uy<6;~Y5Gz}qf`FZ*$0EAqOGTY&Pp~REdWcv3a|!j01f~F zFn|DD00l6B2jBn!*aCKdJ>UR10#3jtU^B1<-~$3+E3gf423!DFzzq-r?sX=lhw31S z{quF?`+5kMLu!q){?9G%di+sj2wq3o#PPkGo{rPMN}X6-nSJJlZDAUl|3iiA{RxYr z$D5m?B*CqZp9T+HR!wS~Y;en(~?c=E_uF||GZ!S%j$~L3q##c?oB0cDO7^| z_icxF@nk)@4J=|(zte>D3e?N>6FW8F9=op>O9Z3ID@o-9Gc9GN(n)cS!`c+{%A9X# zGsTAIcD8z1d>m1VWqp$Jo0_XYJX9O1b#2#{>!+K>D+6ywKWXaSE_!w-!OFQC|LvD{ z_lwQPYM_C2_4Y&FMQ0;N&cuce73O_;e3*5l?$3KBYW*r1Wf^7uugKD&+9$of`muIj zG`%7hM#(w3P6H#pgcsy-Sj)L9s;jKOb+hclcwOP}Hnrd$q$diJl+phL9OJ$Z;v4t$7iIGbVGZj8LVomgVc5|g?cpSnC zydsPH^vBDRg)t9t%!(<7n^Rk1U1hN6Mn5 z-1pjnLInQ5;&-(u3?qsQqX-99oDW4%d`Wjm&|i#)Vv7C6c=+2LzFP#tdGM0rH?;%; zDaLphgexx0!?@_uPJ?^Ja2%Xp@v;#@F@_Te7g5N=2^3e{G)~~}ZHC|=xMD2<+VHkp fM*n|h1d2h56A~x^^P~)s$axh((2b3^injd+*~>;9 diff --git a/mthread.cpp b/mthread.cpp index 269155d..4af25ff 100644 --- a/mthread.cpp +++ b/mthread.cpp @@ -30,7 +30,7 @@ mthread::mthread( void mthread::dispatch() { if((my_thread) && (my_thread->joinable())) delete my_thread; - my_thread = new thread([this] {render();}); + my_thread = new thread([this] {run();}); } @@ -46,185 +46,163 @@ void mthread::join() { if((my_thread) && (my_thread->joinable())) my_thread->join(); } -//TODO make final -//looks for work -void mthread::find_work() { +bool mthread::find_work() { + unsigned int worker, workers_finished; + uint32_t loads[worker_cnt]; + struct mthread_status *peer_status; + struct mthread_divinfo divinfo; + workers_finished = 0; + unique_lock ack; + + status.status_lock.lock(); + status.searching = true; + status.share_finished = true; + status.status_lock.unlock(); + + //TODO do we really need this whole for loop? + for(worker = 0; worker < worker_cnt; worker++) { + //lock other worker so we can request from them + peer_status = &workers[worker]->status; + peer_status->status_lock.lock(); + + //if they're done, we remember that to see if we exit + if((peer_status->share_finished) && (worker != id)) { + workers_finished++; + } + //if they're us, currently looking for work, + //or don't have enough work for us to complete, then skip + + if((worker == id) || + (peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) { + loads[worker] = 0; + peer_status->status_lock.unlock(); + continue; + } + //finally, if they're valid, write them down + loads[worker] = peer_status->row_load; + peer_status->status_lock.unlock(); + } + //exit if all workers are finished + if(workers_finished >= worker_cnt - 1) { + return false; + } + //then we look over and pick our canidates + for(;;) { + //find the worker who has the biggest workload + worker = distance(loads, max_element(loads, &loads[worker_cnt])); + if(!loads[worker]) break; //we have found a worker; distance is 0 + peer_status = &workers[worker]->status; + peer_status->status_lock.lock(); + //check to see if canidate is valid. + //TODO do we really need to check the first time? + if((peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) { + loads[worker] = 0; + peer_status->status_lock.unlock(); + continue; + } + ack = unique_lock(peer_status->ack_lk); + peer_status->div_syn = true; + peer_status->status_lock.unlock(); + peer_status->msg_notify.wait(ack); + ack.unlock(); + if(peer_status->div_error) { + loads[worker] = 0; + peer_status->status_lock.lock(); + peer_status->div_error = false; + peer_status->div_syn = false; + peer_status->status_lock.unlock(); + continue; + } + + divinfo = workers[worker]->divide(); + peer_status->syn_ack_lk.unlock(); + peer_status->msg_notify.notify_all(); + y_min = divinfo.y_min; + y_max = divinfo.y_max; + x_min = divinfo.x_min; + x_max = divinfo.x_max; + status.status_lock.lock(); + status.searching = false; + status.status_lock.unlock(); + break; + } + return true; } //makes sure no one is asking for work from us void mthread::check_work_request() { + unique_lock syn_ack; + + status.status_lock.lock(); + status.row_load = y_max - on_y; + //check if anyone's asking us to divide + if(status.div_syn) { + status.div_error = status.row_load <= min_lines; + if(status.div_error) { + status.ack_lk.unlock(); + status.msg_notify.notify_all(); + } + else { + syn_ack = unique_lock(status.syn_ack_lk); + status.ack_lk.unlock(); + status.msg_notify.notify_all(); + status.msg_notify.wait(syn_ack); + status.row_load = y_max - on_y; + syn_ack.unlock(); + //new x/y min/max is ajusted by other thread, we can continue as normal. + } + } + status.status_lock.unlock(); } //renders area -void mthread::render_area() { //TODO rename +void mthread::render_area() { + uint32_t image_width = image.width(); + unsigned int iter; + complex c, a; //TODO comment + double pixel_value; + + for(on_y = y_min; on_y < y_max; on_y++) { + progress++; + check_work_request(); + for(on_x = x_min; on_x < x_max; on_x++) { + c = (step * complex(on_x,on_y)) + c_min; + a = 0; + for(iter = 0; iter < max_iter; iter++) { + if(abs(a) >= inf_cutoff) break; + a = a*a + c; + } + if(iter >= max_iter) { + iter = 0; + vmap[(on_y * image_width) + on_x] = 0; + } + else { + pixel_value = (iter + 1) - (log((log(pow(abs(a), 2.0)) / 2.0) / log(2.0))); + vmap[(on_y * image_width) + on_x] = pixel_value; + histogram[(int)pixel_value]++; + } + } + } } //alternates states of finding work work and rendering void mthread::run() { - for(;;) { - } -} - - -//TODO move syncronization to another function for extensibility -void mthread::render() { - uint32_t image_width = image.width(); - unsigned int iter; - unsigned int worker, workers_finished; - uint32_t loads[worker_cnt]; - double pixel_value; - complex c, a; - struct mthread_status *peer_status; - struct mthread_divinfo divinfo; - - unique_lock ack; - unique_lock syn_ack; - - - status.status_lock.lock(); + status.status_lock.lock(); //TODO move to initilizer status.searching = false; status.share_finished = false; status.div_syn = false; status.div_error = false; status.status_lock.unlock(); + do { + render_area(); + } while (find_work()); - y_min = 0; - y_max = image.height(); - - - - for(;;) { - //thread is actively rendering - for(on_y = y_min; on_y < y_max; on_y++) { - progress++; - status.status_lock.lock(); - status.row_load = y_max - on_y; - //check if anyone's asking us to divide - if(status.div_syn) { - - status.div_error = status.row_load <= min_lines; - - if(status.div_error) { - status.ack_lk.unlock(); - status.msg_notify.notify_all(); - } - else { - syn_ack = unique_lock(status.syn_ack_lk); - status.ack_lk.unlock(); - status.msg_notify.notify_all(); - status.msg_notify.wait(syn_ack); - status.row_load = y_max - on_y; - syn_ack.unlock(); - //new x/y min/max is ajusted by other thread, we can continue as normal. - } - } - status.status_lock.unlock(); - - for(on_x = x_min; on_x < x_max; on_x++) { - c = (step * complex(on_x,on_y)) + c_min; - a = 0; - for(iter = 0; iter < max_iter; iter++) { - if(abs(a) >= inf_cutoff) break; - a = a*a + c; - } - if(iter >= max_iter) { - iter = 0; - vmap[(on_y * image_width) + on_x] = 0; - } - else { - pixel_value = (iter + 1) - (log((log(pow(abs(a), 2.0)) / 2.0) / log(2.0))); - vmap[(on_y * image_width) + on_x] = pixel_value; - histogram[(int)pixel_value]++; - } - } - } - - - //thread is now searching for work - - /** 2022 comment: - * this state should have been moved to a seperate function to allow rendering methods to differ - * from inherited mthreads without needing to reimpliment searching **/ - status.status_lock.lock(); - status.searching = true; - status.share_finished = true; - status.status_lock.unlock(); - - //first we look over all workers to see which are canidates to ask for work - while(status.searching) { - workers_finished = 0; - //TODO do we really need this whole for loop? - for(worker = 0; worker < worker_cnt; worker++) { - //lock other worker so we can request from them - peer_status = &workers[worker]->status; - peer_status->status_lock.lock(); - - //if they're done, we remember that to see if we exit - if((peer_status->share_finished) && (worker != id)) { - workers_finished++; - } - //if they're us, currently looking for work, - //or don't have enough work for us to complete, then skip - - if((worker == id) || - (peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) { - loads[worker] = 0; - peer_status->status_lock.unlock(); - continue; - } - //finally, if they're valid, write them down - loads[worker] = peer_status->row_load; - peer_status->status_lock.unlock(); - } - //exit if all workers are finished - if(workers_finished >= worker_cnt - 1) { - return; - } - //then we look over and pick our canidates - for(;;) { - //find the worker who has the biggest workload - worker = distance(loads, max_element(loads, &loads[worker_cnt])); - if(!loads[worker]) break; //we have found a worker; distance is 0 - peer_status = &workers[worker]->status; - peer_status->status_lock.lock(); - //check to see if canidate is valid. - //TODO do we really need to check the first time? - if((peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) { - loads[worker] = 0; - peer_status->status_lock.unlock(); - continue; - } - ack = unique_lock(peer_status->ack_lk); - peer_status->div_syn = true; - peer_status->status_lock.unlock(); - peer_status->msg_notify.wait(ack); - ack.unlock(); - if(peer_status->div_error) { - loads[worker] = 0; - peer_status->status_lock.lock(); - peer_status->div_error = false; - peer_status->div_syn = false; - peer_status->status_lock.unlock(); - continue; - } - - divinfo = workers[worker]->divide(); - peer_status->syn_ack_lk.unlock(); - peer_status->msg_notify.notify_all(); - y_min = divinfo.y_min; - y_max = divinfo.y_max; - x_min = divinfo.x_min; - x_max = divinfo.x_max; - status.status_lock.lock(); - status.searching = false; - status.status_lock.unlock(); - break; - } - } - } } + +//TODO move syncronization to another function for extensibility + struct mthread_divinfo mthread::divide() { struct mthread_divinfo ret; ret.x_min = x_min; diff --git a/mthread.hpp b/mthread.hpp index f59732c..953d021 100644 --- a/mthread.hpp +++ b/mthread.hpp @@ -64,9 +64,11 @@ class mthread { int state; struct mthread_divinfo divide(); - void render(); //TODO void render_area(); + void run(); + bool find_work(); + void check_work_request(); }; diff --git a/mthread_old b/mthread_old new file mode 100644 index 0000000..9f7df9f --- /dev/null +++ b/mthread_old @@ -0,0 +1,269 @@ +#include "mthread.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +using namespace std; + +mthread::mthread( + unsigned int x_mn, unsigned int x_mx, complex c_min, complex c_max, + unsigned int inf_cutoff, unsigned int max_iter, png& image, double *g_vmap, unsigned int *g_histogram, + mthread **worker_list, unsigned int id, unsigned int jobs, atomic& progress) + : x_min_orig(x_mn), x_max_orig(x_mx), + c_min(c_min), c_max(c_max), + inf_cutoff(inf_cutoff), max_iter(max_iter), image(image), id(id), worker_cnt(jobs), progress(progress){ + + workers = worker_list; + x_min = x_mn; + x_max = x_mx; + y_min = 0; + y_max = image.height(); + vmap = g_vmap; + histogram = g_histogram; + step = (c_max - c_min) / complex(image.height(), image.width()); + my_thread = NULL; + +status.status_lock.lock(); + status.searching = false; + status.share_finished = false; + status.div_syn = false; + status.div_error = false; + status.status_lock.unlock(); +} + +void mthread::dispatch() { + if((my_thread) && (my_thread->joinable())) delete my_thread; + my_thread = new thread([this] {render();}); +} + + +mthread::~mthread() { + if((my_thread) && (my_thread->joinable())) { + my_thread->join(); + delete my_thread; + } +} + + +void mthread::join() { + if((my_thread) && (my_thread->joinable())) my_thread->join(); +} + +//TODO make final +//looks for work +void mthread::find_work() { +} + +//makes sure no one is asking for work from us +void mthread::check_work_request() { + unique_lock ack; + unique_lock syn_ack; + + status.status_lock.lock(); + status.row_load = y_max - on_y; + //check if anyone's asking us to divide + if(status.div_syn) { + status.div_error = status.row_load <= min_lines; + if(status.div_error) { + status.ack_lk.unlock(); + status.msg_notify.notify_all(); + } + else { + syn_ack = unique_lock(status.syn_ack_lk); + status.ack_lk.unlock(); + status.msg_notify.notify_all(); + status.msg_notify.wait(syn_ack); + status.row_load = y_max - on_y; + syn_ack.unlock(); + //new x/y min/max is ajusted by other thread, we can continue as normal. + } + } + status.status_lock.unlock(); +} + +//renders area +void mthread::render_area() { //TODO rename +} + +//alternates states of finding work work and rendering +void mthread::run() { + for(;;) { + } +} + + +//TODO move syncronization to another function for extensibility +void mthread::render() { + uint32_t image_width = image.width(); + unsigned int iter; + unsigned int worker, workers_finished; + uint32_t loads[worker_cnt]; + double pixel_value; + complex c, a; + struct mthread_status *peer_status; + struct mthread_divinfo divinfo; + + unique_lock ack; + unique_lock syn_ack; + + + status.status_lock.lock(); + status.searching = false; + status.share_finished = false; + status.div_syn = false; + status.div_error = false; + status.status_lock.unlock(); + + + y_min = 0; + y_max = image.height(); + + + + for(;;) { + //thread is actively rendering + for(on_y = y_min; on_y < y_max; on_y++) { + progress++; + check_work_request(); + /** + status.status_lock.lock(); + status.row_load = y_max - on_y; + //check if anyone's asking us to divide + if(status.div_syn) { + status.div_error = status.row_load <= min_lines; + if(status.div_error) { + status.ack_lk.unlock(); + status.msg_notify.notify_all(); + } + else { + syn_ack = unique_lock(status.syn_ack_lk); + status.ack_lk.unlock(); + status.msg_notify.notify_all(); + status.msg_notify.wait(syn_ack); + status.row_load = y_max - on_y; + syn_ack.unlock(); + //new x/y min/max is ajusted by other thread, we can continue as normal. + } + } + status.status_lock.unlock(); + **/ + + for(on_x = x_min; on_x < x_max; on_x++) { + c = (step * complex(on_x,on_y)) + c_min; + a = 0; + for(iter = 0; iter < max_iter; iter++) { + if(abs(a) >= inf_cutoff) break; + a = a*a + c; + } + if(iter >= max_iter) { + iter = 0; + vmap[(on_y * image_width) + on_x] = 0; + } + else { + pixel_value = (iter + 1) - (log((log(pow(abs(a), 2.0)) / 2.0) / log(2.0))); + vmap[(on_y * image_width) + on_x] = pixel_value; + histogram[(int)pixel_value]++; + } + } + } + + + //thread is now searching for work + + /** 2022 comment: + * this state should have been moved to a seperate function to allow rendering methods to differ + * from inherited mthreads without needing to reimpliment searching **/ + status.status_lock.lock(); + status.searching = true; + status.share_finished = true; + status.status_lock.unlock(); + + //first we look over all workers to see which are canidates to ask for work + while(status.searching) { + workers_finished = 0; + //TODO do we really need this whole for loop? + for(worker = 0; worker < worker_cnt; worker++) { + //lock other worker so we can request from them + peer_status = &workers[worker]->status; + peer_status->status_lock.lock(); + + //if they're done, we remember that to see if we exit + if((peer_status->share_finished) && (worker != id)) { + workers_finished++; + } + //if they're us, currently looking for work, + //or don't have enough work for us to complete, then skip + + if((worker == id) || + (peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) { + loads[worker] = 0; + peer_status->status_lock.unlock(); + continue; + } + //finally, if they're valid, write them down + loads[worker] = peer_status->row_load; + peer_status->status_lock.unlock(); + } + //exit if all workers are finished + if(workers_finished >= worker_cnt - 1) { + return; + } + //then we look over and pick our canidates + for(;;) { + //find the worker who has the biggest workload + worker = distance(loads, max_element(loads, &loads[worker_cnt])); + if(!loads[worker]) break; //we have found a worker; distance is 0 + peer_status = &workers[worker]->status; + peer_status->status_lock.lock(); + //check to see if canidate is valid. + //TODO do we really need to check the first time? + if((peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) { + loads[worker] = 0; + peer_status->status_lock.unlock(); + continue; + } + ack = unique_lock(peer_status->ack_lk); + peer_status->div_syn = true; + peer_status->status_lock.unlock(); + peer_status->msg_notify.wait(ack); + ack.unlock(); + if(peer_status->div_error) { + loads[worker] = 0; + peer_status->status_lock.lock(); + peer_status->div_error = false; + peer_status->div_syn = false; + peer_status->status_lock.unlock(); + continue; + } + + divinfo = workers[worker]->divide(); + peer_status->syn_ack_lk.unlock(); + peer_status->msg_notify.notify_all(); + y_min = divinfo.y_min; + y_max = divinfo.y_max; + x_min = divinfo.x_min; + x_max = divinfo.x_max; + status.status_lock.lock(); + status.searching = false; + status.status_lock.unlock(); + break; + } + } + } +} + +struct mthread_divinfo mthread::divide() { + struct mthread_divinfo ret; + ret.x_min = x_min; + ret.x_max = x_max; + ret.y_min = ((y_max - on_y) / 2) + on_y; + ret.y_max = y_max; + y_min = on_y; + y_max = ret.y_min; + status.div_syn = false; + return ret; +}