summaryrefslogtreecommitdiff
path: root/src/devices/memory_device.rs
diff options
context:
space:
mode:
authorBen Bridle <bridle.benjamin@gmail.com>2025-07-03 15:26:07 +1200
committerBen Bridle <ben@derelict.engineering>2025-07-03 21:24:07 +1200
commit2accc78948fa4a18e37ab0bc405f9b2758acaa3e (patch)
tree2551180ef7fb8f67bfc826de4ad3daf2dd24942e /src/devices/memory_device.rs
downloadbedrock-pc-2accc78948fa4a18e37ab0bc405f9b2758acaa3e.zip
Initial commit
Diffstat (limited to 'src/devices/memory_device.rs')
-rw-r--r--src/devices/memory_device.rs186
1 files changed, 186 insertions, 0 deletions
diff --git a/src/devices/memory_device.rs b/src/devices/memory_device.rs
new file mode 100644
index 0000000..d116ca7
--- /dev/null
+++ b/src/devices/memory_device.rs
@@ -0,0 +1,186 @@
+use crate::*;
+
+use std::cmp::min;
+
+
+type Page = [u8; 256];
+
+
+pub struct MemoryDevice {
+ pub limit: u16, // maximum allocateable number of pages
+ pub pages: Vec<Page>, // all allocated pages
+ pub count_write: u16, // number of pages requested by program
+ pub count: usize, // number of pages allocated for use
+ pub copy_write: u16,
+ pub head_1: HeadAddress,
+ pub head_2: HeadAddress,
+}
+
+
+impl Device for MemoryDevice {
+ fn read(&mut self, port: u8) -> u8 {
+ match port {
+ 0x0 => read_h!(self.count),
+ 0x1 => read_l!(self.count),
+ 0x2 => read_h!(self.head_1.page),
+ 0x3 => read_l!(self.head_1.page),
+ 0x4 => read_h!(self.head_1.address),
+ 0x5 => read_l!(self.head_1.address),
+ 0x6 => self.read_head_1(),
+ 0x7 => self.read_head_1(),
+ 0x8 => 0x00,
+ 0x9 => 0x00,
+ 0xA => read_h!(self.head_2.page),
+ 0xB => read_l!(self.head_2.page),
+ 0xC => read_h!(self.head_2.address),
+ 0xD => read_l!(self.head_2.address),
+ 0xE => self.read_head_2(),
+ 0xF => self.read_head_2(),
+ _ => unreachable!(),
+ }
+ }
+
+ fn write(&mut self, port: u8, value: u8) -> Option<Signal> {
+ match port {
+ 0x0 => write_h!(self.count_write, value),
+ 0x1 => { write_l!(self.count_write, value); self.allocate(); },
+ 0x2 => write_h!(self.head_1.page, value),
+ 0x3 => write_l!(self.head_1.page, value),
+ 0x4 => write_h!(self.head_1.address, value),
+ 0x5 => write_l!(self.head_1.address, value),
+ 0x6 => self.write_head_1(value),
+ 0x7 => self.write_head_1(value),
+ 0x8 => write_h!(self.copy_write, value),
+ 0x9 => { write_l!(self.copy_write, value); self.copy(); },
+ 0xA => write_h!(self.head_2.page, value),
+ 0xB => write_l!(self.head_2.page, value),
+ 0xC => write_h!(self.head_2.address, value),
+ 0xD => write_l!(self.head_2.address, value),
+ 0xE => self.write_head_2(value),
+ 0xF => self.write_head_2(value),
+ _ => unreachable!(),
+ };
+ return None;
+ }
+
+ fn wake(&mut self) -> bool {
+ false
+ }
+
+ fn reset(&mut self) {
+ self.pages.clear();
+ self.count_write = 0;
+ self.count = 0;
+ self.copy_write = 0;
+ self.head_1.reset();
+ self.head_2.reset();
+ }
+}
+
+
+impl MemoryDevice {
+ pub fn new() -> Self {
+ Self {
+ limit: u16::MAX,
+ pages: Vec::new(),
+ count_write: 0,
+ count: 0,
+ copy_write: 0,
+ head_1: HeadAddress::new(),
+ head_2: HeadAddress::new(),
+ }
+ }
+
+ pub fn read_head_1(&mut self) -> u8 {
+ let (page_i, byte_i) = self.head_1.get_indices();
+ self.read_byte(page_i, byte_i)
+ }
+
+ pub fn read_head_2(&mut self) -> u8 {
+ let (page_i, byte_i) = self.head_2.get_indices();
+ self.read_byte(page_i, byte_i)
+ }
+
+ fn read_byte(&self, page_i: usize, byte_i: usize) -> u8 {
+ match self.pages.get(page_i) {
+ Some(page) => page[byte_i],
+ None => 0,
+ }
+ }
+
+ pub fn write_head_1(&mut self, value: u8) {
+ let (page_i, byte_i) = self.head_1.get_indices();
+ self.write_byte(page_i, byte_i, value);
+ }
+
+ pub fn write_head_2(&mut self, value: u8) {
+ let (page_i, byte_i) = self.head_2.get_indices();
+ self.write_byte(page_i, byte_i, value);
+ }
+
+ fn write_byte(&mut self, page_i: usize, byte_i: usize, value: u8) {
+ match self.pages.get_mut(page_i) {
+ Some(page) => page[byte_i] = value,
+ None => if page_i < self.count {
+ self.pages.resize(page_i + 1, [0; 256]);
+ self.pages[page_i][byte_i] = value;
+ }
+ }
+ }
+
+ pub fn allocate(&mut self) {
+ self.count = min(self.count_write, self.limit) as usize;
+ // Defer allocation of new pages.
+ self.pages.truncate(self.count as usize);
+ }
+
+ pub fn copy(&mut self) {
+ let src = self.head_2.page as usize;
+ let dest = self.head_1.page as usize;
+ let n = self.copy_write as usize;
+
+ // Pre-allocate destination pages as needed.
+ let allocate = min(dest + n, self.count);
+ if allocate > self.pages.len() {
+ self.pages.resize(allocate, [0; 256]);
+ }
+
+ for i in 0..n {
+ let src_page = match self.pages.get(src + i) {
+ Some(src_page) => src_page.to_owned(),
+ None => [0; 256],
+ };
+ match self.pages.get_mut(dest + i) {
+ Some(dest) => *dest = src_page,
+ None => break,
+ };
+ }
+ }
+}
+
+
+pub struct HeadAddress {
+ pub page: u16,
+ pub address: u16,
+}
+
+impl HeadAddress {
+ pub fn new() -> Self {
+ Self {
+ page: 0,
+ address: 0,
+ }
+ }
+
+ pub fn reset(&mut self) {
+ self.page = 0;
+ self.address = 0;
+ }
+
+ pub fn get_indices(&mut self) -> (usize, usize) {
+ let page_i = (self.page + (self.address / 256)) as usize;
+ let byte_i = (self.address % 256) as usize;
+ self.address = self.address.wrapping_add(1);
+ (page_i, byte_i)
+ }
+}