summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrett Weiland <brett_weiland@bpcspace.com>2023-01-23 14:43:30 -0600
committerBrett Weiland <brett_weiland@bpcspace.com>2023-01-23 14:43:30 -0600
commit3b97e627caa6883391bd1e9f3c31172e10bf7404 (patch)
tree534962ac997ec72434f964129d4ba17ad792ce95
parent61bd7df99d32b8c489383568afc53117ab1fa81f (diff)
finished functions to achieve polymorphism
-rw-r--r--docs/p4.pdfbin5197678 -> 5197666 bytes
-rw-r--r--mthread.cpp292
-rw-r--r--mthread.hpp4
-rw-r--r--mthread_old269
4 files changed, 407 insertions, 158 deletions
diff --git a/docs/p4.pdf b/docs/p4.pdf
index 86c225c..4b0bff8 100644
--- a/docs/p4.pdf
+++ b/docs/p4.pdf
Binary files differ
diff --git a/mthread.cpp b/mthread.cpp
index 269155d..4af25ff 100644
--- a/mthread.cpp
+++ b/mthread.cpp
@@ -30,7 +30,7 @@ mthread::mthread(
void mthread::dispatch() {
if((my_thread) && (my_thread->joinable())) delete my_thread;
- my_thread = new thread([this] {render();});
+ my_thread = new thread([this] {run();});
}
@@ -46,185 +46,163 @@ void mthread::join() {
if((my_thread) && (my_thread->joinable())) my_thread->join();
}
-//TODO make final
-//looks for work
-void mthread::find_work() {
-}
-
-//makes sure no one is asking for work from us
-void mthread::check_work_request() {
-}
-
-//renders area
-void mthread::render_area() { //TODO rename
-}
-
-//alternates states of finding work work and rendering
-void mthread::run() {
- for(;;) {
- }
-}
-
-
-//TODO move syncronization to another function for extensibility
-void mthread::render() {
- uint32_t image_width = image.width();
- unsigned int iter;
+bool mthread::find_work() {
unsigned int worker, workers_finished;
uint32_t loads[worker_cnt];
- double pixel_value;
- complex<double> c, a;
struct mthread_status *peer_status;
struct mthread_divinfo divinfo;
-
+ workers_finished = 0;
unique_lock<mutex> ack;
- unique_lock<mutex> syn_ack;
-
- status.status_lock.lock();
- status.searching = false;
- status.share_finished = false;
- status.div_syn = false;
- status.div_error = false;
+ status.status_lock.lock();
+ status.searching = true;
+ status.share_finished = true;
status.status_lock.unlock();
+ //TODO do we really need this whole for loop?
+ for(worker = 0; worker < worker_cnt; worker++) {
+ //lock other worker so we can request from them
+ peer_status = &workers[worker]->status;
+ peer_status->status_lock.lock();
- y_min = 0;
- y_max = image.height();
+ //if they're done, we remember that to see if we exit
+ if((peer_status->share_finished) && (worker != id)) {
+ workers_finished++;
+ }
+ //if they're us, currently looking for work,
+ //or don't have enough work for us to complete, then skip
+
+ if((worker == id) ||
+ (peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) {
+ loads[worker] = 0;
+ peer_status->status_lock.unlock();
+ continue;
+ }
+ //finally, if they're valid, write them down
+ loads[worker] = peer_status->row_load;
+ peer_status->status_lock.unlock();
+ }
+ //exit if all workers are finished
+ if(workers_finished >= worker_cnt - 1) {
+ return false;
+ }
+ //then we look over and pick our canidates
+ for(;;) {
+ //find the worker who has the biggest workload
+ worker = distance(loads, max_element(loads, &loads[worker_cnt]));
+ if(!loads[worker]) break; //we have found a worker; distance is 0
+ peer_status = &workers[worker]->status;
+ peer_status->status_lock.lock();
+ //check to see if canidate is valid.
+ //TODO do we really need to check the first time?
+ if((peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) {
+ loads[worker] = 0;
+ peer_status->status_lock.unlock();
+ continue;
+ }
+ ack = unique_lock<mutex>(peer_status->ack_lk);
+ peer_status->div_syn = true;
+ peer_status->status_lock.unlock();
+ peer_status->msg_notify.wait(ack);
+ ack.unlock();
+ if(peer_status->div_error) {
+ loads[worker] = 0;
+ peer_status->status_lock.lock();
+ peer_status->div_error = false;
+ peer_status->div_syn = false;
+ peer_status->status_lock.unlock();
+ continue;
+ }
+ divinfo = workers[worker]->divide();
+ peer_status->syn_ack_lk.unlock();
+ peer_status->msg_notify.notify_all();
+ y_min = divinfo.y_min;
+ y_max = divinfo.y_max;
+ x_min = divinfo.x_min;
+ x_max = divinfo.x_max;
+ status.status_lock.lock();
+ status.searching = false;
+ status.status_lock.unlock();
+ break;
+ }
+ return true;
+}
+//makes sure no one is asking for work from us
+void mthread::check_work_request() {
+ unique_lock<mutex> syn_ack;
- for(;;) {
- //thread is actively rendering
- for(on_y = y_min; on_y < y_max; on_y++) {
- progress++;
- status.status_lock.lock();
+ status.status_lock.lock();
+ status.row_load = y_max - on_y;
+ //check if anyone's asking us to divide
+ if(status.div_syn) {
+ status.div_error = status.row_load <= min_lines;
+ if(status.div_error) {
+ status.ack_lk.unlock();
+ status.msg_notify.notify_all();
+ }
+ else {
+ syn_ack = unique_lock<mutex>(status.syn_ack_lk);
+ status.ack_lk.unlock();
+ status.msg_notify.notify_all();
+ status.msg_notify.wait(syn_ack);
status.row_load = y_max - on_y;
- //check if anyone's asking us to divide
- if(status.div_syn) {
-
- status.div_error = status.row_load <= min_lines;
-
- if(status.div_error) {
- status.ack_lk.unlock();
- status.msg_notify.notify_all();
- }
- else {
- syn_ack = unique_lock<mutex>(status.syn_ack_lk);
- status.ack_lk.unlock();
- status.msg_notify.notify_all();
- status.msg_notify.wait(syn_ack);
- status.row_load = y_max - on_y;
- syn_ack.unlock();
- //new x/y min/max is ajusted by other thread, we can continue as normal.
- }
- }
- status.status_lock.unlock();
-
- for(on_x = x_min; on_x < x_max; on_x++) {
- c = (step * complex<double>(on_x,on_y)) + c_min;
- a = 0;
- for(iter = 0; iter < max_iter; iter++) {
- if(abs(a) >= inf_cutoff) break;
- a = a*a + c;
- }
- if(iter >= max_iter) {
- iter = 0;
- vmap[(on_y * image_width) + on_x] = 0;
- }
- else {
- pixel_value = (iter + 1) - (log((log(pow(abs(a), 2.0)) / 2.0) / log(2.0)));
- vmap[(on_y * image_width) + on_x] = pixel_value;
- histogram[(int)pixel_value]++;
- }
- }
+ syn_ack.unlock();
+ //new x/y min/max is ajusted by other thread, we can continue as normal.
}
+ }
+ status.status_lock.unlock();
+}
+//renders area
+void mthread::render_area() {
+ uint32_t image_width = image.width();
+ unsigned int iter;
+ complex<double> c, a; //TODO comment
+ double pixel_value;
- //thread is now searching for work
-
- /** 2022 comment:
- * this state should have been moved to a seperate function to allow rendering methods to differ
- * from inherited mthreads without needing to reimpliment searching **/
- status.status_lock.lock();
- status.searching = true;
- status.share_finished = true;
- status.status_lock.unlock();
-
- //first we look over all workers to see which are canidates to ask for work
- while(status.searching) {
- workers_finished = 0;
- //TODO do we really need this whole for loop?
- for(worker = 0; worker < worker_cnt; worker++) {
- //lock other worker so we can request from them
- peer_status = &workers[worker]->status;
- peer_status->status_lock.lock();
-
- //if they're done, we remember that to see if we exit
- if((peer_status->share_finished) && (worker != id)) {
- workers_finished++;
- }
- //if they're us, currently looking for work,
- //or don't have enough work for us to complete, then skip
-
- if((worker == id) ||
- (peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) {
- loads[worker] = 0;
- peer_status->status_lock.unlock();
- continue;
- }
- //finally, if they're valid, write them down
- loads[worker] = peer_status->row_load;
- peer_status->status_lock.unlock();
+ for(on_y = y_min; on_y < y_max; on_y++) {
+ progress++;
+ check_work_request();
+ for(on_x = x_min; on_x < x_max; on_x++) {
+ c = (step * complex<double>(on_x,on_y)) + c_min;
+ a = 0;
+ for(iter = 0; iter < max_iter; iter++) {
+ if(abs(a) >= inf_cutoff) break;
+ a = a*a + c;
}
- //exit if all workers are finished
- if(workers_finished >= worker_cnt - 1) {
- return;
+ if(iter >= max_iter) {
+ iter = 0;
+ vmap[(on_y * image_width) + on_x] = 0;
}
- //then we look over and pick our canidates
- for(;;) {
- //find the worker who has the biggest workload
- worker = distance(loads, max_element(loads, &loads[worker_cnt]));
- if(!loads[worker]) break; //we have found a worker; distance is 0
- peer_status = &workers[worker]->status;
- peer_status->status_lock.lock();
- //check to see if canidate is valid.
- //TODO do we really need to check the first time?
- if((peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) {
- loads[worker] = 0;
- peer_status->status_lock.unlock();
- continue;
- }
- ack = unique_lock<mutex>(peer_status->ack_lk);
- peer_status->div_syn = true;
- peer_status->status_lock.unlock();
- peer_status->msg_notify.wait(ack);
- ack.unlock();
- if(peer_status->div_error) {
- loads[worker] = 0;
- peer_status->status_lock.lock();
- peer_status->div_error = false;
- peer_status->div_syn = false;
- peer_status->status_lock.unlock();
- continue;
- }
-
- divinfo = workers[worker]->divide();
- peer_status->syn_ack_lk.unlock();
- peer_status->msg_notify.notify_all();
- y_min = divinfo.y_min;
- y_max = divinfo.y_max;
- x_min = divinfo.x_min;
- x_max = divinfo.x_max;
- status.status_lock.lock();
- status.searching = false;
- status.status_lock.unlock();
- break;
+ else {
+ pixel_value = (iter + 1) - (log((log(pow(abs(a), 2.0)) / 2.0) / log(2.0)));
+ vmap[(on_y * image_width) + on_x] = pixel_value;
+ histogram[(int)pixel_value]++;
}
}
}
}
+//alternates states of finding work work and rendering
+void mthread::run() {
+ status.status_lock.lock(); //TODO move to initilizer
+ status.searching = false;
+ status.share_finished = false;
+ status.div_syn = false;
+ status.div_error = false;
+ status.status_lock.unlock();
+
+ do {
+ render_area();
+ } while (find_work());
+
+}
+
+
+//TODO move syncronization to another function for extensibility
+
struct mthread_divinfo mthread::divide() {
struct mthread_divinfo ret;
ret.x_min = x_min;
diff --git a/mthread.hpp b/mthread.hpp
index f59732c..953d021 100644
--- a/mthread.hpp
+++ b/mthread.hpp
@@ -64,9 +64,11 @@ class mthread {
int state;
struct mthread_divinfo divide();
- void render(); //TODO
void render_area();
+
void run();
+ bool find_work();
+ void check_work_request();
};
diff --git a/mthread_old b/mthread_old
new file mode 100644
index 0000000..9f7df9f
--- /dev/null
+++ b/mthread_old
@@ -0,0 +1,269 @@
+#include "mthread.hpp"
+#include <iostream>
+#include <complex>
+#include <unistd.h>
+#include <thread>
+#include <chrono>
+#include <cmath>
+#include <algorithm>
+#include <atomic>
+using namespace std;
+
+mthread::mthread(
+ unsigned int x_mn, unsigned int x_mx, complex<double> c_min, complex<double> c_max,
+ unsigned int inf_cutoff, unsigned int max_iter, png& image, double *g_vmap, unsigned int *g_histogram,
+ mthread **worker_list, unsigned int id, unsigned int jobs, atomic<uint32_t>& progress)
+ : x_min_orig(x_mn), x_max_orig(x_mx),
+ c_min(c_min), c_max(c_max),
+ inf_cutoff(inf_cutoff), max_iter(max_iter), image(image), id(id), worker_cnt(jobs), progress(progress){
+
+ workers = worker_list;
+ x_min = x_mn;
+ x_max = x_mx;
+ y_min = 0;
+ y_max = image.height();
+ vmap = g_vmap;
+ histogram = g_histogram;
+ step = (c_max - c_min) / complex<double>(image.height(), image.width());
+ my_thread = NULL;
+
+status.status_lock.lock();
+ status.searching = false;
+ status.share_finished = false;
+ status.div_syn = false;
+ status.div_error = false;
+ status.status_lock.unlock();
+}
+
+void mthread::dispatch() {
+ if((my_thread) && (my_thread->joinable())) delete my_thread;
+ my_thread = new thread([this] {render();});
+}
+
+
+mthread::~mthread() {
+ if((my_thread) && (my_thread->joinable())) {
+ my_thread->join();
+ delete my_thread;
+ }
+}
+
+
+void mthread::join() {
+ if((my_thread) && (my_thread->joinable())) my_thread->join();
+}
+
+//TODO make final
+//looks for work
+void mthread::find_work() {
+}
+
+//makes sure no one is asking for work from us
+void mthread::check_work_request() {
+ unique_lock<mutex> ack;
+ unique_lock<mutex> syn_ack;
+
+ status.status_lock.lock();
+ status.row_load = y_max - on_y;
+ //check if anyone's asking us to divide
+ if(status.div_syn) {
+ status.div_error = status.row_load <= min_lines;
+ if(status.div_error) {
+ status.ack_lk.unlock();
+ status.msg_notify.notify_all();
+ }
+ else {
+ syn_ack = unique_lock<mutex>(status.syn_ack_lk);
+ status.ack_lk.unlock();
+ status.msg_notify.notify_all();
+ status.msg_notify.wait(syn_ack);
+ status.row_load = y_max - on_y;
+ syn_ack.unlock();
+ //new x/y min/max is ajusted by other thread, we can continue as normal.
+ }
+ }
+ status.status_lock.unlock();
+}
+
+//renders area
+void mthread::render_area() { //TODO rename
+}
+
+//alternates states of finding work work and rendering
+void mthread::run() {
+ for(;;) {
+ }
+}
+
+
+//TODO move syncronization to another function for extensibility
+void mthread::render() {
+ uint32_t image_width = image.width();
+ unsigned int iter;
+ unsigned int worker, workers_finished;
+ uint32_t loads[worker_cnt];
+ double pixel_value;
+ complex<double> c, a;
+ struct mthread_status *peer_status;
+ struct mthread_divinfo divinfo;
+
+ unique_lock<mutex> ack;
+ unique_lock<mutex> syn_ack;
+
+
+ status.status_lock.lock();
+ status.searching = false;
+ status.share_finished = false;
+ status.div_syn = false;
+ status.div_error = false;
+ status.status_lock.unlock();
+
+
+ y_min = 0;
+ y_max = image.height();
+
+
+
+ for(;;) {
+ //thread is actively rendering
+ for(on_y = y_min; on_y < y_max; on_y++) {
+ progress++;
+ check_work_request();
+ /**
+ status.status_lock.lock();
+ status.row_load = y_max - on_y;
+ //check if anyone's asking us to divide
+ if(status.div_syn) {
+ status.div_error = status.row_load <= min_lines;
+ if(status.div_error) {
+ status.ack_lk.unlock();
+ status.msg_notify.notify_all();
+ }
+ else {
+ syn_ack = unique_lock<mutex>(status.syn_ack_lk);
+ status.ack_lk.unlock();
+ status.msg_notify.notify_all();
+ status.msg_notify.wait(syn_ack);
+ status.row_load = y_max - on_y;
+ syn_ack.unlock();
+ //new x/y min/max is ajusted by other thread, we can continue as normal.
+ }
+ }
+ status.status_lock.unlock();
+ **/
+
+ for(on_x = x_min; on_x < x_max; on_x++) {
+ c = (step * complex<double>(on_x,on_y)) + c_min;
+ a = 0;
+ for(iter = 0; iter < max_iter; iter++) {
+ if(abs(a) >= inf_cutoff) break;
+ a = a*a + c;
+ }
+ if(iter >= max_iter) {
+ iter = 0;
+ vmap[(on_y * image_width) + on_x] = 0;
+ }
+ else {
+ pixel_value = (iter + 1) - (log((log(pow(abs(a), 2.0)) / 2.0) / log(2.0)));
+ vmap[(on_y * image_width) + on_x] = pixel_value;
+ histogram[(int)pixel_value]++;
+ }
+ }
+ }
+
+
+ //thread is now searching for work
+
+ /** 2022 comment:
+ * this state should have been moved to a seperate function to allow rendering methods to differ
+ * from inherited mthreads without needing to reimpliment searching **/
+ status.status_lock.lock();
+ status.searching = true;
+ status.share_finished = true;
+ status.status_lock.unlock();
+
+ //first we look over all workers to see which are canidates to ask for work
+ while(status.searching) {
+ workers_finished = 0;
+ //TODO do we really need this whole for loop?
+ for(worker = 0; worker < worker_cnt; worker++) {
+ //lock other worker so we can request from them
+ peer_status = &workers[worker]->status;
+ peer_status->status_lock.lock();
+
+ //if they're done, we remember that to see if we exit
+ if((peer_status->share_finished) && (worker != id)) {
+ workers_finished++;
+ }
+ //if they're us, currently looking for work,
+ //or don't have enough work for us to complete, then skip
+
+ if((worker == id) ||
+ (peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) {
+ loads[worker] = 0;
+ peer_status->status_lock.unlock();
+ continue;
+ }
+ //finally, if they're valid, write them down
+ loads[worker] = peer_status->row_load;
+ peer_status->status_lock.unlock();
+ }
+ //exit if all workers are finished
+ if(workers_finished >= worker_cnt - 1) {
+ return;
+ }
+ //then we look over and pick our canidates
+ for(;;) {
+ //find the worker who has the biggest workload
+ worker = distance(loads, max_element(loads, &loads[worker_cnt]));
+ if(!loads[worker]) break; //we have found a worker; distance is 0
+ peer_status = &workers[worker]->status;
+ peer_status->status_lock.lock();
+ //check to see if canidate is valid.
+ //TODO do we really need to check the first time?
+ if((peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) {
+ loads[worker] = 0;
+ peer_status->status_lock.unlock();
+ continue;
+ }
+ ack = unique_lock<mutex>(peer_status->ack_lk);
+ peer_status->div_syn = true;
+ peer_status->status_lock.unlock();
+ peer_status->msg_notify.wait(ack);
+ ack.unlock();
+ if(peer_status->div_error) {
+ loads[worker] = 0;
+ peer_status->status_lock.lock();
+ peer_status->div_error = false;
+ peer_status->div_syn = false;
+ peer_status->status_lock.unlock();
+ continue;
+ }
+
+ divinfo = workers[worker]->divide();
+ peer_status->syn_ack_lk.unlock();
+ peer_status->msg_notify.notify_all();
+ y_min = divinfo.y_min;
+ y_max = divinfo.y_max;
+ x_min = divinfo.x_min;
+ x_max = divinfo.x_max;
+ status.status_lock.lock();
+ status.searching = false;
+ status.status_lock.unlock();
+ break;
+ }
+ }
+ }
+}
+
+struct mthread_divinfo mthread::divide() {
+ struct mthread_divinfo ret;
+ ret.x_min = x_min;
+ ret.x_max = x_max;
+ ret.y_min = ((y_max - on_y) / 2) + on_y;
+ ret.y_max = y_max;
+ y_min = on_y;
+ y_max = ret.y_min;
+ status.div_syn = false;
+ return ret;
+}