diff --git a/src/bin/server.rs b/src/bin/server.rs index 7830bce..ffb2155 100644 --- a/src/bin/server.rs +++ b/src/bin/server.rs @@ -1,31 +1,53 @@ use std::{ - io::{prelude::*, BufReader}, + io::{prelude::*, BufReader, Lines}, net::{TcpListener, TcpStream}, }; -use std::io::Lines; +use std::thread::available_parallelism; +use crate::thread_pool::ThreadPool; + +mod thread_pool; + fn main() { + // start server on localhost (for remote access use: 0.0.0.0) let listener = TcpListener::bind("127.0.0.1:7878").unwrap(); + + // create thread pool (size = no. of cpu cores) + let pool = ThreadPool::new(available_parallelism().unwrap().get()); + // handling function for incoming requests for stream in listener.incoming() { let stream = stream.unwrap(); - handle_connection(stream); + // pass request to thread pool + pool.execute(|| { + handle_connection(stream); + }); } + println!("Shutting down."); } + fn handle_connection(mut stream: TcpStream) { + // clone TcpStream for simultaneous receiving and sending let mut stream2: TcpStream = stream.try_clone().unwrap(); + + // initialize reading buffer (for better performance!) let buf_reader: BufReader<&mut TcpStream> = BufReader::new(&mut stream); + // request: read received text line by line (continuous until connection closed) let request: Lines> = buf_reader.lines(); println!("Request: {:#?}", request); + // processing function for every text line received by request (terminates when conn closed) for elem in request { + // store received line in variable s let s = elem.unwrap(); println!("{:?}", s); - stream2.write(s.as_bytes()).unwrap(); + + // send received line back to sender (append LF) + stream2.write_all(s.as_bytes()).unwrap(); stream2.write_all(b"\n").unwrap(); } } diff --git a/src/bin/thread_pool.rs b/src/bin/thread_pool.rs new file mode 100644 index 0000000..bb6f734 --- /dev/null +++ b/src/bin/thread_pool.rs @@ -0,0 +1,116 @@ +use std::{ + sync::{mpsc, Arc, Mutex}, + thread +}; + + +pub struct ThreadPool { + workers: Vec, + sender: Option>, +} + +// Box: pointer for heap allocation (smart pointer); dyn: unknown type to compile time +type Job = Box; + + +impl ThreadPool { + /// Create a new ThreadPool. + /// + /// The size is the number of threads in the pool. + /// + /// # Panics + /// + /// The `new` function will panic if the size is zero. + pub fn new(size: usize) -> ThreadPool { + assert!(size > 0); + + // create asynchronous channel with sender/receiver pair (for jobs!) + let (sender, receiver) = mpsc::channel(); + + // protect receiver with thread-safe shared (Arc) mutex to avoid multiple access + let receiver = Arc::new(Mutex::new(receiver)); + + // pre(!)allocate space for workers + let mut workers = Vec::with_capacity(size); + + // initialize all workers... + for id in 0..size { + // create worker with unique id and pointer to the shared mutex + workers.push(Worker::new(id, Arc::clone(&receiver))); + } + + // set thread pool variables + ThreadPool { + workers, + // Some --> enum variant (Option): indicates that sender may not exist (=be null) + sender: Some(sender), + } + } + + pub fn execute(&self, f: F) + where + // FnOnce() --> only callable once; Send --> transfer to another thread; 'static --> lifetime + F: FnOnce() + Send + 'static, + { + let job = Box::new(f); + + // commit job to sender object --> pass to threads for execution + self.sender.as_ref().unwrap().send(job).unwrap(); + } +} + +impl Drop for ThreadPool { + fn drop(&mut self) { + // drop sender first + drop(self.sender.take()); + + // then drop workers + for worker in &mut self.workers { + println!("Shutting down worker {}", worker.id); + + // join worker thread + if let Some(thread) = worker.thread.take() { + thread.join().unwrap(); + } + } + } +} + + +struct Worker { + id: usize, + thread: Option>, +} + +impl Worker { + /// Create a new Worker.. + /// + /// The id is a unique identifier. + /// The receiver is a shared pointer to a receiver protected by Mutex. + fn new(id: usize, receiver: Arc>>) -> Worker { + // spawn thread and enter infinite loop + let thread = thread::spawn(move || loop { + // lock mutex and wait for job + let message = receiver.lock().unwrap().recv(); + + match message { + // normal operation --> execute job + Ok(job) => { + println!("Worker {id} got a job; executing."); + + job(); + } + // exit gracefully when pool is closed --> recv will return error then + Err(_) => { + println!("Worker {id} disconnected; shutting down."); + break; + } + } + }); + + Worker { + id, + thread: Some(thread), + } + } +}