made mthread class polymorphic, finished commenting

This commit is contained in:
Brett Weiland 2023-01-23 18:40:29 -06:00
parent e6cf67d4ed
commit 713d467385
5 changed files with 39 additions and 294 deletions

View File

@ -1,15 +1,9 @@
# Building
Just run make. Depends on libpng.
For Linux, just run make. Depends on libpng.
# Info
This project was my final for CS200 at JCCC. It uses multiple threads to draw the mandelbrot. When one thread is finished, it will communicate with other threads to split the load; work delegation is the main showcase of this project.
This project was my final for CS200 at JCCC. It uses multiple threads to draw the mandelbrot. When one thread is finished, it will communicate with other threads to split the load; work delegation is the main showcase of this project. Additionally, The class used to render is polymorphic, as new rendering methods can be implimented without reimplimenting code such as work delegation and synchronization.
# Writeup
For the writeup portion of this project I did last year, please visit [here](https://git.bpcspace.com/brett/Mandelbrot-CS200/src/branch/master/docs/p4.pdf).
Here you will be able to see demos, as well as flowcharts and pseudo code for some of the functionallity.
# Problems
Due to the fact I had to complete this project in just a few days, there are a few problems I'd fix if I were to do it again.
The largest issue is that the thread worker class is not extensible. I have rendering in the same function as thread communication; you should not need to reimpliment the syncronization aspects of the thread just to change how it renders.
Additionally, I'm not sure if there was a simpler way to prevent race conditions then to manually create a mutex when modifying multiple variables.

Binary file not shown.

View File

@ -26,6 +26,7 @@ mthread::mthread(
histogram = g_histogram;
step = (c_max - c_min) / complex<double>(image.height(), image.width());
my_thread = NULL;
status.status_lock.lock();
}
void mthread::dispatch() {
@ -59,7 +60,6 @@ bool mthread::find_work() {
status.share_finished = true;
status.status_lock.unlock();
//TODO do we really need this whole for loop?
for(worker = 0; worker < worker_cnt; worker++) {
//lock other worker so we can request from them
peer_status = &workers[worker]->status;
@ -69,15 +69,22 @@ bool mthread::find_work() {
if((peer_status->share_finished) && (worker != id)) {
workers_finished++;
}
//if they're us, currently looking for work,
//or don't have enough work for us to complete, then skip
/** The reason why we have this bit in code twice is to prevent race conditions.
* To add the potential canidate worker to our list to be sorted,
* we need to make sure it's not currently being divided (true if div_syn is set).
* Since we'll need to check the same later
* when we actually request to split it's workload,
* we might as well narrow it down and check everything else.
* **/
if((worker == id) ||
(peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) {
(peer_status->searching) || (peer_status->div_syn) ||
(peer_status->row_load < min_lines)) {
loads[worker] = 0;
peer_status->status_lock.unlock();
continue;
}
//finally, if they're valid, write them down
loads[worker] = peer_status->row_load;
peer_status->status_lock.unlock();
@ -93,18 +100,23 @@ bool mthread::find_work() {
if(!loads[worker]) return true; //we have found a worker; distance is 0
peer_status = &workers[worker]->status;
peer_status->status_lock.lock();
//check to see if canidate is valid.
//TODO do we really need to check the first time?
if((peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) {
//re-check to see if canidate is valid (may have changed since added and sorted).
//If you're wondering why we're doing this bit again, see comment before first time.
if((peer_status->searching) || (peer_status->div_syn) ||
(peer_status->row_load < min_lines)) {
loads[worker] = 0;
peer_status->status_lock.unlock();
continue;
}
//ack: ask peer to devide
ack = unique_lock<mutex>(peer_status->ack_lk);
peer_status->div_syn = true;
peer_status->status_lock.unlock();
//when they reset it, they've acknowlaged and have set appropriate messages
peer_status->msg_notify.wait(ack);
ack.unlock();
//area is too small to divide reliably
if(peer_status->div_error) {
loads[worker] = 0;
peer_status->status_lock.lock();
@ -114,13 +126,20 @@ bool mthread::find_work() {
continue;
}
//otherwise, we modify their struct for them to split their workload
divinfo = workers[worker]->divide();
peer_status->syn_ack_lk.unlock();
//tell them we're done modifying
peer_status->msg_notify.notify_all();
//set our own stuff
y_min = divinfo.y_min;
y_max = divinfo.y_max;
x_min = divinfo.x_min;
x_max = divinfo.x_max;
//update our status
status.status_lock.lock();
status.searching = false;
status.status_lock.unlock();
@ -159,25 +178,28 @@ void mthread::check_work_request() {
void mthread::render_area() {
uint32_t image_width = image.width();
unsigned int iter;
complex<double> c, a; //TODO comment
complex<double> c, z;
double pixel_value;
/** pixel is drawn based on if/how many itertions it takes to get to infinity
* while computing z^2+c. **/
for(on_y = y_min; on_y < y_max; on_y++) {
progress++;
check_work_request();
for(on_x = x_min; on_x < x_max; on_x++) {
c = (step * complex<double>(on_x,on_y)) + c_min;
a = 0;
z = 0;
for(iter = 0; iter < max_iter; iter++) {
if(abs(a) >= inf_cutoff) break;
a = a*a + c;
if(abs(z) >= inf_cutoff) break;
z = z*z + c;
}
if(iter >= max_iter) {
iter = 0;
vmap[(on_y * image_width) + on_x] = 0;
}
else {
pixel_value = (iter + 1) - (log((log(pow(abs(a), 2.0)) / 2.0) / log(2.0)));
pixel_value = (iter + 1) - (log((log(pow(abs(z), 2.0)) / 2.0) / log(2.0)));
vmap[(on_y * image_width) + on_x] = pixel_value;
histogram[(int)pixel_value]++;
}
@ -187,7 +209,7 @@ void mthread::render_area() {
//alternates states of finding work work and rendering
void mthread::run() {
status.status_lock.lock(); //TODO move to initilizer
//locks in initilizer
status.searching = false;
status.share_finished = false;
status.div_syn = false;
@ -201,8 +223,6 @@ void mthread::run() {
}
//TODO move syncronization to another function for extensibility
struct mthread_divinfo mthread::divide() {
struct mthread_divinfo ret;
ret.x_min = x_min;

View File

@ -33,7 +33,7 @@ class mthread {
std::complex<double> c_min, std::complex<double> c_max,
unsigned int inf_cutoff, unsigned int max_iter, png& image, double *g_vmap, unsigned int *g_histogram,
mthread **worker_list, unsigned int id, unsigned int jobs, std::atomic<uint32_t>& progress);
~mthread();
virtual ~mthread(); //virtual in case a modified renderer needs to allocate memory
void join();
void dispatch();
@ -64,7 +64,7 @@ class mthread {
int state;
struct mthread_divinfo divide();
void render_area();
virtual void render_area();
void run();
bool find_work();

View File

@ -1,269 +0,0 @@
#include "mthread.hpp"
#include <iostream>
#include <complex>
#include <unistd.h>
#include <thread>
#include <chrono>
#include <cmath>
#include <algorithm>
#include <atomic>
using namespace std;
mthread::mthread(
unsigned int x_mn, unsigned int x_mx, complex<double> c_min, complex<double> c_max,
unsigned int inf_cutoff, unsigned int max_iter, png& image, double *g_vmap, unsigned int *g_histogram,
mthread **worker_list, unsigned int id, unsigned int jobs, atomic<uint32_t>& progress)
: x_min_orig(x_mn), x_max_orig(x_mx),
c_min(c_min), c_max(c_max),
inf_cutoff(inf_cutoff), max_iter(max_iter), image(image), id(id), worker_cnt(jobs), progress(progress){
workers = worker_list;
x_min = x_mn;
x_max = x_mx;
y_min = 0;
y_max = image.height();
vmap = g_vmap;
histogram = g_histogram;
step = (c_max - c_min) / complex<double>(image.height(), image.width());
my_thread = NULL;
status.status_lock.lock();
status.searching = false;
status.share_finished = false;
status.div_syn = false;
status.div_error = false;
status.status_lock.unlock();
}
void mthread::dispatch() {
if((my_thread) && (my_thread->joinable())) delete my_thread;
my_thread = new thread([this] {render();});
}
mthread::~mthread() {
if((my_thread) && (my_thread->joinable())) {
my_thread->join();
delete my_thread;
}
}
void mthread::join() {
if((my_thread) && (my_thread->joinable())) my_thread->join();
}
//TODO make final
//looks for work
void mthread::find_work() {
}
//makes sure no one is asking for work from us
void mthread::check_work_request() {
unique_lock<mutex> ack;
unique_lock<mutex> syn_ack;
status.status_lock.lock();
status.row_load = y_max - on_y;
//check if anyone's asking us to divide
if(status.div_syn) {
status.div_error = status.row_load <= min_lines;
if(status.div_error) {
status.ack_lk.unlock();
status.msg_notify.notify_all();
}
else {
syn_ack = unique_lock<mutex>(status.syn_ack_lk);
status.ack_lk.unlock();
status.msg_notify.notify_all();
status.msg_notify.wait(syn_ack);
status.row_load = y_max - on_y;
syn_ack.unlock();
//new x/y min/max is ajusted by other thread, we can continue as normal.
}
}
status.status_lock.unlock();
}
//renders area
void mthread::render_area() { //TODO rename
}
//alternates states of finding work work and rendering
void mthread::run() {
for(;;) {
}
}
//TODO move syncronization to another function for extensibility
void mthread::render() {
uint32_t image_width = image.width();
unsigned int iter;
unsigned int worker, workers_finished;
uint32_t loads[worker_cnt];
double pixel_value;
complex<double> c, a;
struct mthread_status *peer_status;
struct mthread_divinfo divinfo;
unique_lock<mutex> ack;
unique_lock<mutex> syn_ack;
status.status_lock.lock();
status.searching = false;
status.share_finished = false;
status.div_syn = false;
status.div_error = false;
status.status_lock.unlock();
y_min = 0;
y_max = image.height();
for(;;) {
//thread is actively rendering
for(on_y = y_min; on_y < y_max; on_y++) {
progress++;
check_work_request();
/**
status.status_lock.lock();
status.row_load = y_max - on_y;
//check if anyone's asking us to divide
if(status.div_syn) {
status.div_error = status.row_load <= min_lines;
if(status.div_error) {
status.ack_lk.unlock();
status.msg_notify.notify_all();
}
else {
syn_ack = unique_lock<mutex>(status.syn_ack_lk);
status.ack_lk.unlock();
status.msg_notify.notify_all();
status.msg_notify.wait(syn_ack);
status.row_load = y_max - on_y;
syn_ack.unlock();
//new x/y min/max is ajusted by other thread, we can continue as normal.
}
}
status.status_lock.unlock();
**/
for(on_x = x_min; on_x < x_max; on_x++) {
c = (step * complex<double>(on_x,on_y)) + c_min;
a = 0;
for(iter = 0; iter < max_iter; iter++) {
if(abs(a) >= inf_cutoff) break;
a = a*a + c;
}
if(iter >= max_iter) {
iter = 0;
vmap[(on_y * image_width) + on_x] = 0;
}
else {
pixel_value = (iter + 1) - (log((log(pow(abs(a), 2.0)) / 2.0) / log(2.0)));
vmap[(on_y * image_width) + on_x] = pixel_value;
histogram[(int)pixel_value]++;
}
}
}
//thread is now searching for work
/** 2022 comment:
* this state should have been moved to a seperate function to allow rendering methods to differ
* from inherited mthreads without needing to reimpliment searching **/
status.status_lock.lock();
status.searching = true;
status.share_finished = true;
status.status_lock.unlock();
//first we look over all workers to see which are canidates to ask for work
while(status.searching) {
workers_finished = 0;
//TODO do we really need this whole for loop?
for(worker = 0; worker < worker_cnt; worker++) {
//lock other worker so we can request from them
peer_status = &workers[worker]->status;
peer_status->status_lock.lock();
//if they're done, we remember that to see if we exit
if((peer_status->share_finished) && (worker != id)) {
workers_finished++;
}
//if they're us, currently looking for work,
//or don't have enough work for us to complete, then skip
if((worker == id) ||
(peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) {
loads[worker] = 0;
peer_status->status_lock.unlock();
continue;
}
//finally, if they're valid, write them down
loads[worker] = peer_status->row_load;
peer_status->status_lock.unlock();
}
//exit if all workers are finished
if(workers_finished >= worker_cnt - 1) {
return;
}
//then we look over and pick our canidates
for(;;) {
//find the worker who has the biggest workload
worker = distance(loads, max_element(loads, &loads[worker_cnt]));
if(!loads[worker]) break; //we have found a worker; distance is 0
peer_status = &workers[worker]->status;
peer_status->status_lock.lock();
//check to see if canidate is valid.
//TODO do we really need to check the first time?
if((peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) {
loads[worker] = 0;
peer_status->status_lock.unlock();
continue;
}
ack = unique_lock<mutex>(peer_status->ack_lk);
peer_status->div_syn = true;
peer_status->status_lock.unlock();
peer_status->msg_notify.wait(ack);
ack.unlock();
if(peer_status->div_error) {
loads[worker] = 0;
peer_status->status_lock.lock();
peer_status->div_error = false;
peer_status->div_syn = false;
peer_status->status_lock.unlock();
continue;
}
divinfo = workers[worker]->divide();
peer_status->syn_ack_lk.unlock();
peer_status->msg_notify.notify_all();
y_min = divinfo.y_min;
y_max = divinfo.y_max;
x_min = divinfo.x_min;
x_max = divinfo.x_max;
status.status_lock.lock();
status.searching = false;
status.status_lock.unlock();
break;
}
}
}
}
struct mthread_divinfo mthread::divide() {
struct mthread_divinfo ret;
ret.x_min = x_min;
ret.x_max = x_max;
ret.y_min = ((y_max - on_y) / 2) + on_y;
ret.y_max = y_max;
y_min = on_y;
y_max = ret.y_min;
status.div_syn = false;
return ret;
}