diff options
author | Ben Bridle <bridle.benjamin@gmail.com> | 2024-09-07 18:21:24 +1200 |
---|---|---|
committer | Ben Bridle <bridle.benjamin@gmail.com> | 2024-09-07 18:22:22 +1200 |
commit | e4fe2229314411f597e4d20dc64c6fda390e81f2 (patch) | |
tree | 28e1b73397cb72786dc5d67931d0469ca514764c /src/stack.rs | |
parent | 227df52d0bad2118ec7ae990f17157c1f79ffea9 (diff) | |
download | bedrock-core-e4fe2229314411f597e4d20dc64c6fda390e81f2.zip |
Implement micro optimisations for unary operators
A 1% speed increase was observed when testing a tight decrementing loop
with these optimisations.
Diffstat (limited to 'src/stack.rs')
-rw-r--r-- | src/stack.rs | 36 |
1 files changed, 36 insertions, 0 deletions
diff --git a/src/stack.rs b/src/stack.rs index 14e1799..2862b72 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -50,3 +50,39 @@ impl Stack { self.sp = 0; } } + +use std::ops::Not; + +impl Stack { + pub fn inc_u8(&mut self) { + let sp = self.sp.wrapping_sub(1) as usize; + self.mem[sp] = self.mem[sp].wrapping_add(1); + } + + pub fn dec_u8(&mut self) { + let sp = self.sp.wrapping_sub(1) as usize; + self.mem[sp] = self.mem[sp].wrapping_sub(1); + } + + pub fn not_u8(&mut self) { + let sp = self.sp.wrapping_sub(1) as usize; + self.mem[sp] = self.mem[sp].not(); + } + + pub fn not_u16(&mut self) { + let sp = self.sp.wrapping_sub(1) as usize; + self.mem[sp] = self.mem[sp].not(); + let sp = self.sp.wrapping_sub(2) as usize; + self.mem[sp] = self.mem[sp].not(); + } + + pub fn tal_u8(&mut self) { + let sp = self.sp.wrapping_sub(1) as usize; + self.mem[sp] = self.mem[sp].count_ones() as u8; + } + + pub fn rev_u8(&mut self) { + let sp = self.sp.wrapping_sub(1) as usize; + self.mem[sp] = self.mem[sp].reverse_bits(); + } +} |