#include "mthread.hpp" #include #include #include #include #include #include #include #include using namespace std; mthread::mthread( unsigned int x_mn, unsigned int x_mx, complex c_min, complex c_max, unsigned int inf_cutoff, unsigned int max_iter, png& image, double *g_vmap, unsigned int *g_histogram, mthread **worker_list, unsigned int id, unsigned int jobs, atomic& progress) : x_min_orig(x_mn), x_max_orig(x_mx), c_min(c_min), c_max(c_max), inf_cutoff(inf_cutoff), max_iter(max_iter), image(image), id(id), worker_cnt(jobs), progress(progress){ workers = worker_list; x_min = x_mn; x_max = x_mx; y_min = 0; y_max = image.height(); vmap = g_vmap; histogram = g_histogram; step = (c_max - c_min) / complex(image.height(), image.width()); my_thread = NULL; status.status_lock.lock(); } void mthread::dispatch() { if((my_thread) && (my_thread->joinable())) delete my_thread; my_thread = new thread([this] {run();}); } mthread::~mthread() { if((my_thread) && (my_thread->joinable())) { my_thread->join(); delete my_thread; } } void mthread::join() { if((my_thread) && (my_thread->joinable())) my_thread->join(); } bool mthread::find_work() { unsigned int worker, workers_finished; uint32_t loads[worker_cnt]; struct mthread_status *peer_status; struct mthread_divinfo divinfo; workers_finished = 0; unique_lock ack; status.status_lock.lock(); status.searching = true; status.share_finished = true; status.status_lock.unlock(); for(worker = 0; worker < worker_cnt; worker++) { //lock other worker so we can request from them peer_status = &workers[worker]->status; peer_status->status_lock.lock(); //if they're done, we remember that to see if we exit if((peer_status->share_finished) && (worker != id)) { workers_finished++; } /** The reason why we have this bit in code twice is to prevent race conditions. * To add the potential canidate worker to our list to be sorted, * we need to make sure it's not currently being divided (true if div_syn is set). * Since we'll need to check the same later * when we actually request to split it's workload, * we might as well narrow it down and check everything else. * **/ if((worker == id) || (peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) { loads[worker] = 0; peer_status->status_lock.unlock(); continue; } //finally, if they're valid, write them down loads[worker] = peer_status->row_load; peer_status->status_lock.unlock(); } //exit if all workers are finished if(workers_finished >= worker_cnt - 1) { return false; } //then we look over and pick our canidates for(;;) { //find the worker who has the biggest workload worker = distance(loads, max_element(loads, &loads[worker_cnt])); if(!loads[worker]) return true; //we have found a worker; distance is 0 peer_status = &workers[worker]->status; peer_status->status_lock.lock(); //re-check to see if canidate is valid (may have changed since added and sorted). //If you're wondering why we're doing this bit again, see comment before first time. if((peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) { loads[worker] = 0; peer_status->status_lock.unlock(); continue; } //ack: ask peer to devide ack = unique_lock(peer_status->ack_lk); peer_status->div_syn = true; peer_status->status_lock.unlock(); //when they reset it, they've acknowlaged and have set appropriate messages peer_status->msg_notify.wait(ack); ack.unlock(); //area is too small to divide reliably if(peer_status->div_error) { loads[worker] = 0; peer_status->status_lock.lock(); peer_status->div_error = false; peer_status->div_syn = false; peer_status->status_lock.unlock(); continue; } //otherwise, we modify their struct for them to split their workload divinfo = workers[worker]->divide(); peer_status->syn_ack_lk.unlock(); //tell them we're done modifying peer_status->msg_notify.notify_all(); //set our own stuff y_min = divinfo.y_min; y_max = divinfo.y_max; x_min = divinfo.x_min; x_max = divinfo.x_max; //update our status status.status_lock.lock(); status.searching = false; status.status_lock.unlock(); return true; } return true; } //makes sure no one is asking for work from us void mthread::check_work_request() { unique_lock syn_ack; status.status_lock.lock(); status.row_load = y_max - on_y; //check if anyone's asking us to divide if(status.div_syn) { status.div_error = status.row_load <= min_lines; if(status.div_error) { status.ack_lk.unlock(); status.msg_notify.notify_all(); } else { syn_ack = unique_lock(status.syn_ack_lk); status.ack_lk.unlock(); status.msg_notify.notify_all(); status.msg_notify.wait(syn_ack); status.row_load = y_max - on_y; syn_ack.unlock(); //new x/y min/max is ajusted by other thread, we can continue as normal. } } status.status_lock.unlock(); } //renders area void mthread::render_area() { uint32_t image_width = image.width(); unsigned int iter; complex c, z; double pixel_value; /** pixel is drawn based on if/how many itertions it takes to get to infinity * while computing z^2+c. **/ for(on_y = y_min; on_y < y_max; on_y++) { progress++; check_work_request(); for(on_x = x_min; on_x < x_max; on_x++) { c = (step * complex(on_x,on_y)) + c_min; z = 0; for(iter = 0; iter < max_iter; iter++) { if(abs(z) >= inf_cutoff) break; z = z*z + c; } if(iter >= max_iter) { iter = 0; vmap[(on_y * image_width) + on_x] = 0; } else { pixel_value = (iter + 1) - (log((log(pow(abs(z), 2.0)) / 2.0) / log(2.0))); vmap[(on_y * image_width) + on_x] = pixel_value; histogram[(int)pixel_value]++; } } } } //alternates states of finding work work and rendering void mthread::run() { //locks in initilizer status.searching = false; status.share_finished = false; status.div_syn = false; status.div_error = false; status.status_lock.unlock(); do { render_area(); } while (find_work()); } struct mthread_divinfo mthread::divide() { struct mthread_divinfo ret; ret.x_min = x_min; ret.x_max = x_max; ret.y_min = ((y_max - on_y) / 2) + on_y; ret.y_max = y_max; y_min = on_y; y_max = ret.y_min; status.div_syn = false; return ret; }