diff options
author | Ben Bridle <ben@derelict.engineering> | 2025-09-19 13:17:14 +1200 |
---|---|---|
committer | Ben Bridle <ben@derelict.engineering> | 2025-09-19 13:32:32 +1200 |
commit | bb1aa5958d1b67707dcf0f6b08bfaf0b408bd46e (patch) | |
tree | b26d07ed58aaf7a5230fc3e28c103d616abfa9b8 /arm9/source/devices/memory.c | |
parent | 9612c307f00c4313d73fe0c3a86c05c8d8cd514e (diff) | |
download | bedrock-nds-bb1aa5958d1b67707dcf0f6b08bfaf0b408bd46e.zip |
Massive rewrite
This commit rewrites the emulator halfway from scratch to make it
easier to change and maintain in the future. The emulator core was
rewritten to adhere to the released Bedrock specification (earlier
versions implemented an older prototype specification, which is no
longer relevant).
This commit also adds proper support for running multiple concurrent
Bedrock instances. This was previously supported in a limited manner
for the on-screen keyboard, but now works for any regular program as
well, with switching being performed by pressing the L or R bumper
buttons. This is disabled by default, as programs will still need to
be baked into the emulator and hand-loaded.
Diffstat (limited to 'arm9/source/devices/memory.c')
-rw-r--r-- | arm9/source/devices/memory.c | 231 |
1 files changed, 123 insertions, 108 deletions
diff --git a/arm9/source/devices/memory.c b/arm9/source/devices/memory.c index 915aec2..c9e86d7 100644 --- a/arm9/source/devices/memory.c +++ b/arm9/source/devices/memory.c @@ -1,143 +1,158 @@ -#include "nds.h" #include "memory.h" -static u8 heap[HEAP_SIZE][256]; // the page heap. -static u8 unallocated[256]; // page to use for all unallocated reads and writes. -static u8 heap_index[HEAP_SIZE]; -u8 mem_read1(MemoryDevice *mem) { - u8 output = mem->point1[mem->byte1++]; - if (mem->byte1 == 0) { - mem->page1++; - mem_get_page1(mem); - } - return output; +/* +The 'heap' array is the global pool of memory from which pages are allocated +and deallocated, used by every Bedrock instance. The 'heap_index' array tracks +the owner of each page of memory: the value of each entry is the ID of the +instance that owns / has allocated the corresponding page in the heap. An ID +of zero indicates that the page is free / hasn't been allocated yet. + +If multiple instances are repeatedly allocating and deallocating pages, it's +expected that the allocations will become very fragmented, with pages owned +by each instance interleaving with one another. To find the heap address that +corresponds with an allocated page address, the heap will need to be traversed, +which makes random access very expensive. To mitigate this, the heap address +of every page accessible from the current offset value (which determines the +base page to address from) is cached in an array of 256 pointers, and the +pointer corresponding with the current page is further cached as a pointer. + +The 'unallocated' array acts as a sort of null page, with every access of an +unallocated page being routed instead to that page. This allows the read and +write methods to omit a test to check whether a page is allocated or not; +effectively, every page is always allocated. This is allowed because the +Bedrock specification says that an unallocated read or write causes undefined +behaviour, allowing us to optimise away the test. + +TODO: If one instance allocates memory, then a second instance allocates memory, +then the first instance deallocates, then the second instance allocates some +more, the pages allocated to the second instance will be out of order, but the +code so far assumes that all pages are in the correct order (with gaps or not). +There needs to be either a page reordering pass (time expensive) or an array to +track the allocation order of pages for each instance (space expensive). The +former approach would be the best, because it's unlikely that this would be a +frequent occurrence. I'll need to shuffle pages around using memcpy and the null +page. Only touch the pages of the current instance, to avoid having to recalculate +the page caches of every instance. We can determine if pages will need to be +shuffled / defragmented by checking if an unallocated page is traversed before +the final allocated page is found. +*/ + + +// Size of the heap in pages. The value was chosen by steadily increasing it +// until the emulator refused to compile, and then easing back a bit. +#define MAX_PAGES (12000) + +// The heap. +static u8 heap[MAX_PAGES][256]; // the page heap. + +// Null page to use for unallocated reads and writes. +static u8 unallocated[256]; + +// Map heap pages to instances. An ID of zero represents an unallocated page. +static u8 heap_index[MAX_PAGES]; + + +// Reset a read-write head on the memory device. +void memory_head_reset(MemoryHead *head) { + head->offset = 0; + head->byte = 0; + head->point = (u8*) &unallocated; + head->page = 0; + for (int i=0; i<256; i++) head->cache[i] = (u8*) &unallocated; } -u8 mem_read2(MemoryDevice *mem) { - u8 output = mem->point2[mem->byte2++]; - if (mem->byte2 == 0) { - mem->page2++; - mem_get_page2(mem); - } - return output; +// Reset the memory device. +void memory_reset(MemoryDevice *mem) { + mem->count = 0; + mem->copy = 0; + memory_allocate(mem); + memory_head_reset(&mem->head1); + memory_head_reset(&mem->head2); } -void mem_write1(MemoryDevice *mem, u8 value) { - mem->point1[mem->byte1++] = value; - if (mem->byte1 == 0) { - mem->page1++; - mem_get_page1(mem); +// Read a byte from a memory head. +u8 memory_read(MemoryHead *head) { + u8 output = head->point[head->byte++]; + if (head->byte == 0) { + head->page++; + memory_refresh_page(head); } + return output; } -void mem_write2(MemoryDevice *mem, u8 value) { - mem->point2[mem->byte2++] = value; - if (mem->byte2 == 0) { - mem->page2++; - mem_get_page2(mem); +// Write a byte to a memory head. +void memory_write(MemoryHead *head, u8 value) { + head->point[head->byte++] = value; + if (head->byte == 0) { + head->page++; + memory_refresh_page(head); } } - -void mem_load_cache1(MemoryDevice *mem) { - // Set all cached pointers to unallocated. - for (int i=0; i<256; i++) { - mem->cache1[i] = (u8*) &unallocated; - } - // Iterate over all heap pages, gather our ones. - int count = 0; - int cached = 0; - for (int i=0; i<HEAP_SIZE; i++) { - if (heap_index[i] == mem->id) { - if (count >= mem->offset1) { - mem->cache1[cached] = (u8*) &heap[i]; - cached++; - } - count++; - if (cached == 256) break; - } - } +void memory_refresh_page(MemoryHead *head) { + head->point = head->cache[head->page]; } -void mem_load_cache2(MemoryDevice *mem) { - // Set all cached pointers to unallocated. - for (int i=0; i<256; i++) { - mem->cache2[i] = (u8*) &unallocated; - } - // Iterate over all heap pages, gather our ones. - int count = 0; - int cached = 0; - for (int i=0; i<HEAP_SIZE; i++) { - if (heap_index[i] == mem->id) { - if (count >= mem->offset2) { - mem->cache2[cached] = (u8*) &heap[i]; - cached++; +// Recalculate the pointers stored in the page cache. +void memory_refresh_cache(MemoryDevice *mem, MemoryHead *head) { + // Point every page in the cache to the unallocated page. + for (int i=0; i<256; i++) head->cache[i] = (u8*) &unallocated; + // Iterate over every page in the heap, gather all pages allocated to + // this instance that belong to the current offset. + int n = 0; + int cache_i = 0; + for (int heap_i=0; heap_i<MAX_PAGES; heap_i++) { + if (heap_index[heap_i] == mem->id) { + if (n++ >= head->offset) { + head->cache[cache_i++] = (u8*) &heap[heap_i]; + if (cache_i == 256) break; } - count++; - if (cached == 256) break; } } } -// Fetch the page1 pointer from cache1. -void mem_get_page1(MemoryDevice *mem) { - mem->point1 = mem->cache1[mem->page1]; -} - -// Fetch the page2 pointer from cache2. -void mem_get_page2(MemoryDevice *mem) { - mem->point2 = mem->cache2[mem->page2]; -} - -void mem_allocate(MemoryDevice *mem) { - int count = 0; - - // ALLOCATING - if (mem->count_write > mem->count) { - int to_allocate = mem->count_write - mem->count; - for (int i=0; i<HEAP_SIZE; i++) { +// Allocate mem->count pages of memory. +void memory_allocate(MemoryDevice *mem) { + // Track how many pages have been allocated/deallocated so far. + int n = 0; + // Allocate memory. + if (mem->count > mem->allocated) { + int to_allocate = mem->count - mem->allocated; + for (int i=0; i<MAX_PAGES; i++) { if (heap_index[i] == 0) { heap_index[i] = mem->id; - count++; - if (count == to_allocate) { - break; - } + if (++n == to_allocate) break; } } - // DEALLOCATING - } else if (mem->count_write < mem->count) { - for (int i=0; i<HEAP_SIZE; i++) { + // Deallocate memory. + } else if (mem->count < mem->allocated) { + for (int i=0; i<MAX_PAGES; i++) { if (heap_index[i] == mem->id) { - count++; - if (count > mem->count_write) { - heap_index[i] = 0; + if (++n > mem->count) { + // Clear the page when it's deallocated. memset(heap[i], 0, 256); + heap_index[i] = 0; } } } } - - // Count the number of pages allocated to us. - mem->count = 0; - for (int i=0; i<HEAP_SIZE; i++) { - if (heap_index[i]==mem->id) mem->count++; + // Re-count the number of pages allocated to this instance. + mem->allocated = 0; + for (int i=0; i<MAX_PAGES; i++) { + if (heap_index[i]==mem->id) mem->allocated++; } - // Reload the pointer caches for each head. - mem_load_cache1(mem); - mem_load_cache2(mem); + // Refresh the page caches for both heads. + memory_refresh_cache(mem, &mem->head1); + memory_refresh_cache(mem, &mem->head2); } - -void mem_do_copy(MemoryDevice *mem) { - int src = mem->offset2; - int dest = mem->offset1; - if (src == dest) return; - for (int i=0; i<mem->copy_write; i++) { - if (src>=mem->count || dest>=mem->count) { - return; - } - memcpy(&heap[dest++], &heap[src++], 256); +// Copy mem->count pages from head 2 to head 1. +void memory_copy(MemoryDevice *mem) { + if (mem->head1.offset == mem->head2.offset) return; + for (int n=0; n<mem->copy; n++) { + if (mem->head1.cache[n] == (u8*) &unallocated) return; + if (mem->head2.cache[n] == (u8*) &unallocated) return; + memcpy(mem->head1.cache[n], mem->head2.cache[n], 256); } } - |