fixed readme conflict

This commit is contained in:
Brett Weiland 2023-01-23 19:11:54 -06:00
commit e63ea49825
4 changed files with 170 additions and 141 deletions

View File

@ -1,17 +1,9 @@
# Building
Just run make. Depends on libpng.
For Linux, just run make. Depends on libpng.
# Info
This project was my final for CS200 at JCCC. It uses multiple threads to draw the mandelbrot. When one thread is finished, it will communicate with other threads to split the load; work delegation is the main showcase of this project.
This project was my final for CS200 at JCCC. It uses multiple threads to draw the mandelbrot. When one thread is finished, it will communicate with other threads to split the load; work delegation is the main showcase of this project. Additionally, The class used to render is polymorphic, as new rendering methods can be implimented without reimplimenting code such as work delegation and synchronization.
# Writeup
For the writeup portion of this project I did last year, please visit [here](https://git.bpcspace.com/brett/Mandelbrot-CS200/src/branch/master/docs/p4.pdf).
Here you will be able to see demos, as well as flowcharts and pseudo code for some of the functionallity.
# Problems
Due to the fact I had to complete this project in just a few days, there are a few problems I'd fix if I were to do it again.
The largest issue is that the thread worker class is not extensible. I have rendering in the same function as thread communication; you should not need to reimpliment the syncronization aspects of the thread just to change how it renders. I am currently fixing this in another branch.
Additionally, I'm not sure if there was a simpler way to prevent race conditions then to manually create a mutex when modifying multiple variables.
New issue discovered on Jan 23rd: there's some redundant code in the syncronization section. I'm fixing in the mthread extensibility branch.

Binary file not shown.

View File

@ -26,11 +26,12 @@ mthread::mthread(
histogram = g_histogram;
step = (c_max - c_min) / complex<double>(image.height(), image.width());
my_thread = NULL;
status.status_lock.lock();
}
void mthread::dispatch() {
if((my_thread) && (my_thread->joinable())) delete my_thread;
my_thread = new thread([this] {render();});
my_thread = new thread([this] {run();});
}
@ -46,151 +47,182 @@ void mthread::join() {
if((my_thread) && (my_thread->joinable())) my_thread->join();
}
void mthread::render() {
uint32_t image_width = image.width();
unsigned int iter;
bool mthread::find_work() {
unsigned int worker, workers_finished;
uint32_t loads[worker_cnt];
double pixel_value;
complex<double> c, a;
struct mthread_status *peer_status;
struct mthread_divinfo divinfo;
workers_finished = 0;
unique_lock<mutex> ack;
status.status_lock.lock();
status.searching = true;
status.share_finished = true;
status.status_lock.unlock();
for(worker = 0; worker < worker_cnt; worker++) {
//lock other worker so we can request from them
peer_status = &workers[worker]->status;
peer_status->status_lock.lock();
//if they're done, we remember that to see if we exit
if((peer_status->share_finished) && (worker != id)) {
workers_finished++;
}
/** The reason why we have this bit in code twice is to prevent race conditions.
* To add the potential canidate worker to our list to be sorted,
* we need to make sure it's not currently being divided (true if div_syn is set).
* Since we'll need to check the same later
* when we actually request to split it's workload,
* we might as well narrow it down and check everything else.
* **/
if((worker == id) ||
(peer_status->searching) || (peer_status->div_syn) ||
(peer_status->row_load < min_lines)) {
loads[worker] = 0;
peer_status->status_lock.unlock();
continue;
}
//finally, if they're valid, write them down
loads[worker] = peer_status->row_load;
peer_status->status_lock.unlock();
}
//exit if all workers are finished
if(workers_finished >= worker_cnt - 1) {
return false;
}
//then we look over and pick our canidates
for(;;) {
//find the worker who has the biggest workload
worker = distance(loads, max_element(loads, &loads[worker_cnt]));
if(!loads[worker]) return true; //we have found a worker; distance is 0
peer_status = &workers[worker]->status;
peer_status->status_lock.lock();
//re-check to see if canidate is valid (may have changed since added and sorted).
//If you're wondering why we're doing this bit again, see comment before first time.
if((peer_status->searching) || (peer_status->div_syn) ||
(peer_status->row_load < min_lines)) {
loads[worker] = 0;
peer_status->status_lock.unlock();
continue;
}
//ack: ask peer to devide
ack = unique_lock<mutex>(peer_status->ack_lk);
peer_status->div_syn = true;
peer_status->status_lock.unlock();
//when they reset it, they've acknowlaged and have set appropriate messages
peer_status->msg_notify.wait(ack);
ack.unlock();
//area is too small to divide reliably
if(peer_status->div_error) {
loads[worker] = 0;
peer_status->status_lock.lock();
peer_status->div_error = false;
peer_status->div_syn = false;
peer_status->status_lock.unlock();
continue;
}
//otherwise, we modify their struct for them to split their workload
divinfo = workers[worker]->divide();
peer_status->syn_ack_lk.unlock();
//tell them we're done modifying
peer_status->msg_notify.notify_all();
//set our own stuff
y_min = divinfo.y_min;
y_max = divinfo.y_max;
x_min = divinfo.x_min;
x_max = divinfo.x_max;
//update our status
status.status_lock.lock();
status.searching = false;
status.status_lock.unlock();
return true;
}
return true;
}
//makes sure no one is asking for work from us
void mthread::check_work_request() {
unique_lock<mutex> syn_ack;
status.status_lock.lock();
status.row_load = y_max - on_y;
//check if anyone's asking us to divide
if(status.div_syn) {
status.div_error = status.row_load <= min_lines;
if(status.div_error) {
status.ack_lk.unlock();
status.msg_notify.notify_all();
}
else {
syn_ack = unique_lock<mutex>(status.syn_ack_lk);
status.ack_lk.unlock();
status.msg_notify.notify_all();
status.msg_notify.wait(syn_ack);
status.row_load = y_max - on_y;
syn_ack.unlock();
//new x/y min/max is ajusted by other thread, we can continue as normal.
}
}
status.status_lock.unlock();
}
//renders area
void mthread::render_area() {
uint32_t image_width = image.width();
unsigned int iter;
complex<double> c, z;
double pixel_value;
/** pixel is drawn based on if/how many itertions it takes to get to infinity
* while computing z^2+c. **/
for(on_y = y_min; on_y < y_max; on_y++) {
progress++;
check_work_request();
for(on_x = x_min; on_x < x_max; on_x++) {
c = (step * complex<double>(on_x,on_y)) + c_min;
z = 0;
for(iter = 0; iter < max_iter; iter++) {
if(abs(z) >= inf_cutoff) break;
z = z*z + c;
}
if(iter >= max_iter) {
iter = 0;
vmap[(on_y * image_width) + on_x] = 0;
}
else {
pixel_value = (iter + 1) - (log((log(pow(abs(z), 2.0)) / 2.0) / log(2.0)));
vmap[(on_y * image_width) + on_x] = pixel_value;
histogram[(int)pixel_value]++;
}
}
}
}
//alternates states of finding work work and rendering
void mthread::run() {
//locks in initilizer
status.searching = false;
status.share_finished = false;
status.div_syn = false;
status.div_error = false;
status.status_lock.unlock();
do {
render_area();
} while (find_work());
y_min = 0;
y_max = image.height();
for(;;) {
//thread is actively rendering
for(on_y = y_min; on_y < y_max; on_y++) {
progress++;
status.status_lock.lock();
status.row_load = y_max - on_y;
if(status.div_syn) {
status.div_error = status.row_load <= min_lines;
if(status.div_error) {
status.ack_lk.unlock();
status.msg_notify.notify_all();
}
else {
syn_ack = unique_lock<mutex>(status.syn_ack_lk);
status.ack_lk.unlock();
status.msg_notify.notify_all();
status.msg_notify.wait(syn_ack);
status.row_load = y_max - on_y;
syn_ack.unlock();
//new x/y min/max is ajusted by other thread, we can continue as normal.
}
}
status.status_lock.unlock();
for(on_x = x_min; on_x < x_max; on_x++) {
c = (step * complex<double>(on_x,on_y)) + c_min;
a = 0;
for(iter = 0; iter < max_iter; iter++) {
if(abs(a) >= inf_cutoff) break;
a = a*a + c;
}
if(iter >= max_iter) {
iter = 0;
vmap[(on_y * image_width) + on_x] = 0;
}
else {
pixel_value = (iter + 1) - (log((log(pow(abs(a), 2.0)) / 2.0) / log(2.0)));
vmap[(on_y * image_width) + on_x] = pixel_value;
histogram[(int)pixel_value]++;
}
}
}
//thread is now searching for work
/** 2022 comment:
* this state should have been moved to a seperate function to allow rendering methods to differ
* from inherited mthreads without needing to reimpliment searching **/
status.status_lock.lock();
status.searching = true;
status.share_finished = true;
status.status_lock.unlock();
while(status.searching) {
workers_finished = 0;
for(worker = 0; worker < worker_cnt; worker++) {
peer_status = &workers[worker]->status;
peer_status->status_lock.lock();
if((peer_status->share_finished) && (worker != id)) {
workers_finished++;
}
if((worker == id) ||
(peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) {
loads[worker] = 0;
peer_status->status_lock.unlock();
continue;
}
loads[worker] = peer_status->row_load;
peer_status->status_lock.unlock();
}
if(workers_finished >= worker_cnt - 1) {
return;
}
for(;;) {
worker = distance(loads, max_element(loads, &loads[worker_cnt]));
if(!loads[worker]) break;
peer_status = &workers[worker]->status;
peer_status->status_lock.lock();
if((peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) {
loads[worker] = 0;
peer_status->status_lock.unlock();
continue;
}
ack = unique_lock<mutex>(peer_status->ack_lk);
peer_status->div_syn = true;
peer_status->status_lock.unlock();
peer_status->msg_notify.wait(ack);
ack.unlock();
if(peer_status->div_error) {
loads[worker] = 0;
peer_status->status_lock.lock();
peer_status->div_error = false;
peer_status->div_syn = false;
peer_status->status_lock.unlock();
continue;
}
divinfo = workers[worker]->divide();
peer_status->syn_ack_lk.unlock();
peer_status->msg_notify.notify_all();
y_min = divinfo.y_min;
y_max = divinfo.y_max;
x_min = divinfo.x_min;
x_max = divinfo.x_max;
status.status_lock.lock();
status.searching = false;
status.status_lock.unlock();
break;
}
}
}
}
struct mthread_divinfo mthread::divide() {
struct mthread_divinfo ret;
ret.x_min = x_min;

View File

@ -33,7 +33,7 @@ class mthread {
std::complex<double> c_min, std::complex<double> c_max,
unsigned int inf_cutoff, unsigned int max_iter, png& image, double *g_vmap, unsigned int *g_histogram,
mthread **worker_list, unsigned int id, unsigned int jobs, std::atomic<uint32_t>& progress);
~mthread();
virtual ~mthread(); //virtual in case a modified renderer needs to allocate memory
void join();
void dispatch();
@ -61,9 +61,14 @@ class mthread {
uint32_t x_min, x_max, y_min, y_max;
uint32_t on_x, on_y;
unsigned int divisions;
void render();
int state;
struct mthread_divinfo divide();
virtual void render_area();
void run();
bool find_work();
void check_work_request();
};