summaryrefslogtreecommitdiff
path: root/mthread.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'mthread.cpp')
-rw-r--r--mthread.cpp204
1 files changed, 204 insertions, 0 deletions
diff --git a/mthread.cpp b/mthread.cpp
new file mode 100644
index 0000000..796055e
--- /dev/null
+++ b/mthread.cpp
@@ -0,0 +1,204 @@
+#include "mthread.hpp"
+#include <iostream>
+#include <complex>
+#include <unistd.h>
+#include <thread>
+#include <chrono>
+#include <cmath>
+#include <algorithm>
+#include <atomic>
+using namespace std;
+
+mthread::mthread(
+ unsigned int x_mn, unsigned int x_mx, complex<double> c_min, complex<double> c_max,
+ unsigned int inf_cutoff, unsigned int max_iter, png& image, double *g_vmap, unsigned int *g_histogram,
+ mthread **worker_list, unsigned int id, unsigned int jobs, atomic<uint32_t>& progress)
+ : x_min_orig(x_mn), x_max_orig(x_mx),
+ c_min(c_min), c_max(c_max),
+ inf_cutoff(inf_cutoff), max_iter(max_iter), image(image), id(id), worker_cnt(jobs), progress(progress){
+
+ workers = worker_list;
+ x_min = x_mn;
+ x_max = x_mx;
+ y_min = 0;
+ y_max = image.height();
+ vmap = g_vmap;
+ histogram = g_histogram;
+ step = (c_max - c_min) / complex<double>(image.height(), image.width());
+ my_thread = NULL;
+}
+
+void mthread::dispatch() {
+ if((my_thread) && (my_thread->joinable())) delete my_thread;
+ my_thread = new thread([this] {render();});
+}
+
+
+mthread::~mthread() {
+ if((my_thread) && (my_thread->joinable())) {
+ my_thread->join();
+ delete my_thread;
+ }
+}
+
+
+void mthread::join() {
+ if((my_thread) && (my_thread->joinable())) my_thread->join();
+}
+
+
+void mthread::render() {
+ uint32_t image_width = image.width();
+ unsigned int iter;
+ unsigned int worker, workers_finished;
+ uint32_t loads[worker_cnt];
+ double pixel_value;
+ complex<double> c, a;
+ struct mthread_status *peer_status;
+ struct mthread_divinfo divinfo;
+
+ unique_lock<mutex> ack;
+ unique_lock<mutex> syn_ack;
+
+
+ status.status_lock.lock();
+ status.searching = false;
+ status.share_finished = false;
+ status.div_syn = false;
+ status.div_error = false;
+ status.status_lock.unlock();
+
+
+ y_min = 0;
+ y_max = image.height();
+
+
+
+ for(;;) {
+ //thread is actively rendering
+ for(on_y = y_min; on_y < y_max; on_y++) {
+ progress++;
+ status.status_lock.lock();
+ status.row_load = y_max - on_y;
+ if(status.div_syn) {
+
+ status.div_error = status.row_load <= min_lines;
+
+ if(status.div_error) {
+ status.ack_lk.unlock();
+ status.msg_notify.notify_all();
+ }
+ else {
+ syn_ack = unique_lock<mutex>(status.syn_ack_lk);
+ status.ack_lk.unlock();
+ status.msg_notify.notify_all();
+ status.msg_notify.wait(syn_ack);
+ status.row_load = y_max - on_y;
+ syn_ack.unlock();
+ //new x/y min/max is ajusted by other thread, we can continue as normal.
+ }
+ }
+ status.status_lock.unlock();
+
+ for(on_x = x_min; on_x < x_max; on_x++) {
+ c = (step * complex<double>(on_x,on_y)) + c_min;
+ a = 0;
+ for(iter = 0; iter < max_iter; iter++) {
+ if(abs(a) >= inf_cutoff) break;
+ a = a*a + c;
+ }
+ if(iter >= max_iter) {
+ iter = 0;
+ vmap[(on_y * image_width) + on_x] = 0;
+ }
+ else {
+ pixel_value = (iter + 1) - (log((log(pow(abs(a), 2.0)) / 2.0) / log(2.0)));
+ vmap[(on_y * image_width) + on_x] = pixel_value;
+ histogram[(int)pixel_value]++;
+ }
+ }
+ }
+
+
+ //thread is now searching for work
+
+ /** 2022 comment:
+ * this state should have been moved to a seperate function to allow rendering methods to differ
+ * from inherited mthreads without needing to reimpliment searching **/
+ status.status_lock.lock();
+ status.searching = true;
+ status.share_finished = true;
+ status.status_lock.unlock();
+
+ while(status.searching) {
+ workers_finished = 0;
+ for(worker = 0; worker < worker_cnt; worker++) {
+ peer_status = &workers[worker]->status;
+ peer_status->status_lock.lock();
+
+ if((peer_status->share_finished) && (worker != id)) {
+ workers_finished++;
+ }
+ if((worker == id) ||
+ (peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) {
+ loads[worker] = 0;
+ peer_status->status_lock.unlock();
+ continue;
+ }
+ loads[worker] = peer_status->row_load;
+ peer_status->status_lock.unlock();
+ }
+ if(workers_finished >= worker_cnt - 1) {
+ return;
+ }
+ for(;;) {
+ worker = distance(loads, max_element(loads, &loads[worker_cnt]));
+ if(!loads[worker]) break;
+ peer_status = &workers[worker]->status;
+ peer_status->status_lock.lock();
+ if((peer_status->searching) || (peer_status->div_syn) || (peer_status->row_load < min_lines)) {
+ loads[worker] = 0;
+ peer_status->status_lock.unlock();
+ continue;
+ }
+ ack = unique_lock<mutex>(peer_status->ack_lk);
+ peer_status->div_syn = true;
+ peer_status->status_lock.unlock();
+ peer_status->msg_notify.wait(ack);
+ ack.unlock();
+ if(peer_status->div_error) {
+ loads[worker] = 0;
+ peer_status->status_lock.lock();
+ peer_status->div_error = false;
+ peer_status->div_syn = false;
+ peer_status->status_lock.unlock();
+ continue;
+ }
+
+ divinfo = workers[worker]->divide();
+ peer_status->syn_ack_lk.unlock();
+ peer_status->msg_notify.notify_all();
+ y_min = divinfo.y_min;
+ y_max = divinfo.y_max;
+ x_min = divinfo.x_min;
+ x_max = divinfo.x_max;
+ status.status_lock.lock();
+ status.searching = false;
+ status.status_lock.unlock();
+ break;
+ }
+ }
+ }
+}
+
+struct mthread_divinfo mthread::divide() {
+ struct mthread_divinfo ret;
+ ret.x_min = x_min;
+ ret.x_max = x_max;
+ ret.y_min = ((y_max - on_y) / 2) + on_y;
+ ret.y_max = y_max;
+ y_min = on_y;
+ y_max = ret.y_min;
+ status.div_syn = false;
+ return ret;
+}