feat: Output ordering!
As results come from the dispatcher('s return channel) they are pushed
into a vector to be reordered. They're sorted in reverse-order so that
they can be popped from the vector. Upon receipt and buffering of a
scanline, a loop checks the tail of the buffer to see if it's the
next-to-write element. Since the tail is popped off, this loop can run
until this condition is not met.
This commit is contained in:
55
src/main.rs
55
src/main.rs
@@ -66,7 +66,7 @@ fn main() {
|
||||
};
|
||||
|
||||
thread::scope(|s| {
|
||||
let (mut dispatcher, mut scanline_receiver) = thread_utils::Dispatcher::new(&small_rng);
|
||||
let (mut dispatcher, scanline_receiver) = thread_utils::Dispatcher::new(&small_rng);
|
||||
|
||||
s.spawn(move || {
|
||||
for y in (0..image.1).rev() {
|
||||
@@ -83,14 +83,52 @@ fn main() {
|
||||
dispatcher.submit_job(RenderCommand::Stop);
|
||||
// ... also I happen to know there are 4 threads.
|
||||
});
|
||||
eprintln!("Reached the scanline collector");
|
||||
|
||||
/*
|
||||
* Store received results in the segments buffer.
|
||||
* Some will land before their previous segments and will need to be held
|
||||
* until the next-to-write arrives.
|
||||
*
|
||||
* Elements are sorted in reverse order so that they can be popped from the
|
||||
* Vec quickly.
|
||||
*
|
||||
* The queue is scanned every single time a new item is received. In the
|
||||
* happy path where the received item is next-up, it'll be buffered, checked
|
||||
* and then printed. In the case where it isn't, it'll get buffered and
|
||||
* stick around for more loops. When the next-to-write finally lands, it
|
||||
* means the n+1 element is up, now. If that element is already in the buffer
|
||||
* we want to write it out. Hence the loop that scans the whole buffer each
|
||||
* receive.
|
||||
*
|
||||
* TODO: There could be an up-front conditional that checks to see if the
|
||||
* received item *is* the next-to-write and skip the buffering step.
|
||||
* But I need to make the concept work at all, first.
|
||||
*/
|
||||
let mut raster_segments = Vec::<thread_utils::RenderResult>::new();
|
||||
let mut sl_output_index = image.1-1; // scanlines count down, start at image height.
|
||||
while let Ok(scanline) = scanline_receiver.recv() {
|
||||
//TODO: sort results once multiple threads are introduced.
|
||||
eprintln!("Received scanline: {}", scanline.line_num);
|
||||
for color in scanline.line {
|
||||
println!("{}", color.print_ppm(samples_per_pixel));
|
||||
|
||||
raster_segments.push(scanline);
|
||||
raster_segments.sort_by( |a, b| b.cmp(a) );
|
||||
|
||||
loop {
|
||||
if raster_segments.len() == 0 { break; } // can this ever happen? Not while every
|
||||
// single element gets pushed to the
|
||||
// buffer first. With the happy path
|
||||
// short-circuit noted above, it could.
|
||||
|
||||
let last_ind = raster_segments.len() - 1;
|
||||
if raster_segments[last_ind].line_num == sl_output_index{
|
||||
let scanline = raster_segments.pop().unwrap();
|
||||
print_scanline(scanline, samples_per_pixel);
|
||||
sl_output_index -= 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
eprintln!("Size of raster_segments at finish: {}", raster_segments.len());
|
||||
});
|
||||
|
||||
|
||||
@@ -98,6 +136,13 @@ fn main() {
|
||||
eprintln!("Done!");
|
||||
}
|
||||
|
||||
fn print_scanline(scanline: thread_utils::RenderResult, samples_per_pixel: u32){
|
||||
eprintln!("Printing scanline num: {}", scanline.line_num);
|
||||
for color in &scanline.line {
|
||||
println!("{}", color.print_ppm(samples_per_pixel));
|
||||
}
|
||||
}
|
||||
|
||||
#[derive (Clone)]
|
||||
pub struct RenderContext{
|
||||
image: (i32, i32),
|
||||
|
||||
@@ -3,6 +3,7 @@ use crate::RenderContext;
|
||||
use crate::Vec3;
|
||||
use crate::render_line;
|
||||
|
||||
use core::cmp::Ordering;
|
||||
use std::thread;
|
||||
use std::sync::mpsc;
|
||||
use rand::rngs::SmallRng;
|
||||
@@ -17,6 +18,32 @@ pub struct RenderResult {
|
||||
pub line: Vec<Vec3>,
|
||||
}
|
||||
|
||||
impl Ord for RenderResult {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
if self.line_num > other.line_num {
|
||||
Ordering::Less
|
||||
} else if self.line_num < other.line_num {
|
||||
Ordering::Greater
|
||||
} else {
|
||||
Ordering::Equal
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for RenderResult {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for RenderResult {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.line_num == other.line_num
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for RenderResult {}
|
||||
|
||||
/*
|
||||
* The dispatcher will hold a list of threads, and a list of command input channels to match.
|
||||
* Helper functions exist to input jobs serially, and then dispatch them to an open thread.
|
||||
|
||||
Reference in New Issue
Block a user