First commit, Vystem v0.1

This commit is contained in:
2026-03-31 22:15:00 +02:00
commit e15daed8c0
462 changed files with 134655 additions and 0 deletions

View File

@@ -0,0 +1,34 @@
// SPDX-License-Identifier: MPL-2.0
#ifndef SH_LIB_HEAP_H
#define SH_LIB_HEAP_H
#include "memory/page.h"
#include "memory/pba.h"
#include "memory/pez/pez.h"
#include "memory/pez/radix.h"
#include "memory/slabs/slab_generic.h"
#include "std/status.h"
#include "std/type.h"
// Heap structure
typedef struct {
sh_pez_PHYSICAL_PLANE *phys_plane;
sh_pez_VIRTUAL_PLANE *virt_plane;
sh_page_PAGE_TABLE_POOL *kernel_ptp;
struct sh_slab_generic_SLAB_ALLOCATOR slabs_allocator[8];
sh_pba_PAGE_BLOCK_ALLOCATOR pba[8];
sh_radix_TREE alloc_size_tree;
} sh_heap_KERNEL_HEAP;
// Return default kernel heap
sh_heap_KERNEL_HEAP *sh_heap_get_default_heap();
// Load default kernel heap
void sh_heap_load_default_heap(sh_heap_KERNEL_HEAP *heap);
// Initialize heap structure, calling entity need to input manually all slabs allocator.
SH_STATUS sh_heap_init_heap(sh_pez_PHYSICAL_PLANE *phys_plane,sh_pez_VIRTUAL_PLANE *virt_plane,sh_page_PAGE_TABLE_POOL *kernel_ptp,sh_heap_KERNEL_HEAP *kernel_heap);
// Allocate a certain amount of pages from Pez physical backend
SH_STATUS sh_heap_allocate_pages(sh_uint32 pages_count,sh_page_VIRTUAL_ADDRESS *address);
// Free a allocated region of pages from the heap
SH_STATUS sh_heap_free_pages(sh_page_VIRTUAL_ADDRESS va);
// Allocate a certain object based on his size
SH_STATUS sh_heap_allocate_object(sh_uint32 size_bytes,sh_page_VIRTUAL_ADDRESS *address);
// Free a certain object based on his size
SH_STATUS sh_heap_free_object(sh_page_VIRTUAL_ADDRESS va);
#endif

View File

@@ -0,0 +1,159 @@
// SPDX-License-Identifier: MPL-2.0
#ifndef SH_LIB_PAGE_H
#define SH_LIB_PAGE_H
#include "std/type.h"
#include "std/status.h"
#include "memory/vmem_layout.h"
#define SH_PAGE_KERNEL_PERM_VA_BASE SH_VMEM_LAYOUT_KERNEL_ALLOC_SPACE_VA
#define SH_PAGE_KERNEL_PERM_VA_END SH_VMEM_LAYOUT_KERNEL_ALLOC_SPACE_VA_END
#define SH_PAGE_MEMORY_MAP_VA SH_VMEM_LAYOUT_MEMORY_MAP_VA
#define SH_PAGE_SIZE 4096
#define SH_PAGE_MAX_MEM_COUNT 16ULL*1024*1024*1024*1024
#define SH_PAGE_MAX_PAGES_COUNT (SH_PAGE_MAX_MEM_COUNT/SH_PAGE_SIZE)
#define SH_PAGE_PTP_ALLOCATOR_PAGES_COUNT 4096
#define SH_PAGE_PTP_ALLOCATOR_BITMAP_UINT64 SH_PAGE_PTP_ALLOCATOR_PAGES_COUNT/64
typedef sh_uint32 sh_page_MEMORY_TYPE;
#define SH_PAGE_RESERVED_MEMORY_TYPE (sh_page_MEMORY_TYPE)0
#define SH_PAGE_LOADER_CODE (sh_page_MEMORY_TYPE)1
#define SH_PAGE_LOADER_DATA (sh_page_MEMORY_TYPE)2
#define SH_PAGE_BOOT_SERVICES_CODE (sh_page_MEMORY_TYPE)3
#define SH_PAGE_BOOT_SERVICES_DATA (sh_page_MEMORY_TYPE)4
#define SH_PAGE_RUNTIME_SERVICES_CODE (sh_page_MEMORY_TYPE)5
#define SH_PAGE_RUNTIME_SERVICES_DATA (sh_page_MEMORY_TYPE)6
#define SH_PAGE_CONVENTIONAL_MEMORY (sh_page_MEMORY_TYPE)7
#define SH_PAGE_UNUSABLE_MEMORY (sh_page_MEMORY_TYPE)8
#define SH_PAGE_ACPI_RECLAIM_MEMORY (sh_page_MEMORY_TYPE)9
#define SH_PAGE_ACPI_MEMORY_NVS (sh_page_MEMORY_TYPE)10
#define SH_PAGE_MEMORY_MAPPED_IO (sh_page_MEMORY_TYPE)11
#define SH_PAGE_MEMORY_MAPPED_IO_PORT_SPACE (sh_page_MEMORY_TYPE)12
#define SH_PAGE_PAL_CODE (sh_page_MEMORY_TYPE)13
#define SH_PAGE_PERSISTENT_MEMORY (sh_page_MEMORY_TYPE)14
#define SH_PAGE_RESERVED (sh_page_MEMORY_TYPE)15
typedef sh_uint64 sh_page_PHYSICAL_ADDRESS;
typedef sh_uint64 sh_page_VIRTUAL_ADDRESS;
#define SH_PAGE_NULL_PA (sh_page_PHYSICAL_ADDRESS)0
#define SH_PAGE_NULL_VA (sh_page_VIRTUAL_ADDRESS)0
#define SH_PAGE_PRESENT (1ULL<<0)
#define SH_PAGE_TABLE_FLAGS (1ULL<<1)
#define SH_PAGE_RW (1ULL<<1)
#define SH_PAGE_US (1ULL<<2)
#define SH_PAGE_PWT (1ULL<<3)
#define SH_PAGE_PCD (1ULL<<4)
#define SH_PAGE_ACCESSED (1ULL<<5)
#define SH_PAGE_DIRTY (1ULL<<6)
#define SH_PAGE_PS (1ULL<<7)
#define SH_PAGE_GLOBAL (1ULL<<8)
#define SH_PAGE_NX (1ULL<<63)
// Memory map entry structure.
#pragma pack(1)
typedef struct {
sh_uint32 type;
sh_uint64 physical_start;
sh_uint64 pages_count;
sh_uint64 attributes;
} sh_page_MEMORY_MAP_ENTRY;
#pragma pack()
// Memory map header structure.
#pragma pack(1)
typedef struct {
sh_uint8 sig_start[8];
sh_uint64 entry_count;
sh_uint64 entry_size;
sh_uint8 mmap_syntax_version;
} sh_page_MEMORY_MAP_HEADER;
#pragma pack()
// Page table pool structure.
#pragma pack(1)
typedef struct {
sh_page_PHYSICAL_ADDRESS page_table_pa;
sh_page_VIRTUAL_ADDRESS page_table_va;
sh_uint64 ptp_alloc_bitmap[SH_PAGE_PTP_ALLOCATOR_BITMAP_UINT64];
sh_uint64 ptp_pages_count;
sh_uint64 ptp_alloc_bitmap_uint64_count;
} sh_page_PAGE_TABLE_POOL;
#pragma pack()
// Memory statistics structure
typedef struct {
sh_uint64 memory_total_pages; // memory_total is the size of the addressable physical space
sh_uint64 memory_total_bytes;
sh_uint64 memory_installed_pages; // memory_installed is the sum of the size of all free regions at kernel boot
sh_uint64 memory_installed_bytes;
sh_uint64 free_pages;
sh_uint64 used_pages;
double free_ratio;
double used_ratio;
sh_uint64 largest_free_block;
sh_uint64 largest_used_block;
sh_uint64 free_blocks_count;
sh_uint64 used_blocks_count;
sh_uint64 physical_bitmap_size_pages;
sh_uint64 physical_bitmap_size_bytes;
} sh_page_MEM_STATS;
// Load boot PTP VA. Intended for one usage only.
SH_STATUS sh_page_load_boot_ptp_va(sh_page_VIRTUAL_ADDRESS pt_pool_va);
// Return boot PTP VA.
sh_page_VIRTUAL_ADDRESS sh_page_get_boot_ptp_va();
// Copy memory map provided bootloader into dedicated buffer. Intended for one usage only.
SH_STATUS sh_page_copy_memory_map();
// Check for memory map signatures and read memory map header.
SH_STATUS sh_page_check_memory_map();
// Return the amount of physical memory in pages
sh_uint64 sh_page_get_physical_memory_amount_pages();
// Return the amount of physical memory in bytes
sh_uint64 sh_page_get_physical_memory_amount_bytes();
// Return the first available physical page in physical bitmap.
sh_uint64 sh_page_get_one_page_na();
// Set pages ranges into provided bitmap.
SH_STATUS sh_page_set_pages_range_bitmap(sh_uint8 *bitmap,sh_uint64 page_count_in_bitmap,sh_uint64 page_index,sh_uint64 page_count,sh_bool state);
// Return the status of a page inside the provided bitmap.
static inline sh_bool sh_page_is_allocated(sh_uint8 *bitmap,sh_uint64 page_index) {
sh_uint64 byte_index=page_index/8;
sh_uint8 bit_index=page_index%8;
return (bitmap[byte_index] & (1u<<bit_index))!=0;
}
// Initialize PTP structure.
SH_STATUS sh_page_init_ptp(sh_page_PHYSICAL_ADDRESS ptp_pa,sh_page_VIRTUAL_ADDRESS ptp_va,sh_uint64 initial_fill_level,sh_page_PAGE_TABLE_POOL *page_table_pool);
// Dump provided PTP bitmap, intented for debug.
SH_STATUS sh_page_dump_ptp_bitmap(sh_page_PAGE_TABLE_POOL *ptp);
// Allocate one page from a PTP internal bitmap.
sh_page_PHYSICAL_ADDRESS sh_page_ptp_alloc_one_page(sh_page_PAGE_TABLE_POOL *pt_pool);
// Convert a PA provided by any layer of the PTP into a valid VA. Return 0 if PA isn't in expected range.
static inline sh_uint64 *sh_page_ptp_pa_to_va(sh_page_PAGE_TABLE_POOL *ptp,sh_uint64 pa) {
sh_uint64 base=ptp->page_table_pa;
sh_uint64 size=ptp->ptp_pages_count*4096;
if (pa<base || pa>=base+size) return 0;
return (sh_uint64 *)(ptp->page_table_va+(pa-base));
}
// Map one physical page to VA to provided PTP.
SH_STATUS sh_page_map_one_page_ptp(sh_page_PAGE_TABLE_POOL *ptp,sh_page_VIRTUAL_ADDRESS va,sh_page_PHYSICAL_ADDRESS pa,sh_uint64 flags);
// Return according SH_STATUS if provided VA is mapped inside provided PTP.
SH_STATUS sh_page_is_va_mapped_ptp(sh_page_PAGE_TABLE_POOL *ptp,sh_page_VIRTUAL_ADDRESS va);
// Return according SH_STATUS if provided VA is mapped inside provided PTP for all the range provided
SH_STATUS sh_page_is_va_range_mapped_ptp(sh_page_PAGE_TABLE_POOL *ptp,sh_page_VIRTUAL_ADDRESS va,sh_uint64 size_bytes);
// Search for an available amount of virtual memory inside provided range with the provided size
SH_STATUS sh_page_search_available_va_range(sh_page_PAGE_TABLE_POOL *ptp,sh_page_VIRTUAL_ADDRESS range_base,sh_page_VIRTUAL_ADDRESS range_size_bytes,sh_uint64 size_bytes,sh_page_VIRTUAL_ADDRESS *address_found);
// Map a range of pages from PA to VA. Both virtual and physic area has to be continuous. VAs availability is checked, not physical pages availability.
SH_STATUS sh_page_map_contiguous_pages_range_ptp(sh_page_PAGE_TABLE_POOL *ptp,sh_page_VIRTUAL_ADDRESS va,sh_page_PHYSICAL_ADDRESS pa,sh_uint64 flags,sh_uint64 size_bytes);
// Search for an available amount of pages inside physical bitmap
SH_STATUS sh_page_search_physical_contiguous_block_na(sh_uint64 pages_needed,sh_page_PHYSICAL_ADDRESS *pa);
// Allocate the corresponding amount of pages to size_bytes.
SH_STATUS sh_page_alloc_contiguous(sh_page_PAGE_TABLE_POOL *ptp,sh_uint64 size_bytes,sh_page_VIRTUAL_ADDRESS* va);
// Allocate the corresponding amount of pages to size_bytes. Provide support for custom flags and VA range search.
SH_STATUS sh_page_alloc_contiguous_extended(sh_page_PAGE_TABLE_POOL *ptp,sh_uint64 size_bytes,sh_page_VIRTUAL_ADDRESS* va,DEFAULT sh_uint64 flags,DEFAULT sh_page_VIRTUAL_ADDRESS va_range_start,DEFAULT sh_uint64 va_range_size_bytes);
// Unmap one page from a PTP, assume VA is mapped. Does not unallocate associed physical page
SH_STATUS sh_page_unmap_one_page_ptp(sh_page_PAGE_TABLE_POOL *ptp,sh_page_VIRTUAL_ADDRESS va);
// Map a range of pages to VA. Both virtual and physic area has to be continuous. VAs mapping is checked, not physical pages occupation.
SH_STATUS sh_page_unmap_contiguous_pages_range_ptp(sh_page_PAGE_TABLE_POOL *ptp,sh_page_VIRTUAL_ADDRESS va,sh_uint64 size_bytes);
// Convert a VA allocated in a PTP into his equivalent PA by searching inside the PTP
SH_STATUS sh_page_ptp_va_to_pa(sh_page_PAGE_TABLE_POOL *ptp,sh_page_VIRTUAL_ADDRESS va,sh_page_PHYSICAL_ADDRESS *pa);
// Unalloc one page from a VA. Check if VA is mapped or not. PA is calculted trough searching in PTP
SH_STATUS sh_page_unalloc_one_page(sh_page_PAGE_TABLE_POOL *ptp,sh_page_VIRTUAL_ADDRESS va);
// Unalloc a range of virtually contiguous pages. Check if the entire range is allocated before unallocating anything.
SH_STATUS sh_page_unalloc_contiguous(sh_page_PAGE_TABLE_POOL *ptp,sh_page_VIRTUAL_ADDRESS va,sh_uint64 size_bytes);
// Parse memory map, set all non usable pages into physical bitmap and compute available amount of physical memory. Intended for one usage only.
SH_STATUS sh_page_analyse_memory_map(sh_page_PAGE_TABLE_POOL *ptp);
// Return physical bitmap pointer
sh_page_VIRTUAL_ADDRESS sh_page_get_physical_bitmap_ptr();
// Get physical memory statistics
SH_STATUS sh_page_get_memory_stats(sh_page_MEM_STATS *mem_stats);
#endif

View File

@@ -0,0 +1,20 @@
// SPDX-License-Identifier: MPL-2.0
#ifndef SH_LIB_PBA_H
#define SH_LIB_PBA_H
#include "std/type.h"
#include "std/status.h"
#include "memory/page.h"
#include "memory/pez/pez.h"
// Page block allocator
typedef struct {
sh_page_VIRTUAL_ADDRESS start_va;
sh_uint64 total_pages;
sh_uint64 block_pages;
sh_uint64 block_count;
sh_uint64 max_blocks;
} sh_pba_PAGE_BLOCK_ALLOCATOR;
// Initialize a page block allocator
SH_STATUS sh_pba_init(sh_pba_PAGE_BLOCK_ALLOCATOR *pba,sh_page_VIRTUAL_ADDRESS start_va,sh_uint64 area_pages_amount,sh_uint64 block_pages);
// Allocate a block and return corresponding pointer
SH_STATUS sh_pba_alloc(sh_pba_PAGE_BLOCK_ALLOCATOR *pba,sh_page_PAGE_TABLE_POOL *ptp,sh_page_VIRTUAL_ADDRESS *ptr);
#endif

View File

@@ -0,0 +1,71 @@
// SPDX-License-Identifier: MPL-2.0
#ifndef SH_LIB_PEZ_H
#define SH_LIB_PEZ_H
#include "std/type.h"
#include "std/status.h"
#include "memory/slabs/slab_reg_phys.h"
#include "memory/slabs/slab_reg_virt.h"
#include "memory/pez/radix.h"
#define SH_PEZ_REGION_OBJECT_INDEX_SIZE_BYTES 3
// Physical region object
#pragma pack(1)
typedef struct {
sh_uint32 start_page_index;
sh_uint32 region_size_pages;
sh_uint8 next_region_index[3];
sh_uint8 flags;
} sh_pez_REGION_PHYSICAL_OBJECT;
#pragma pack()
// Virtual region object
#pragma pack(1)
typedef struct {
sh_uint32 start_page_index;
sh_uint32 region_size_pages;
sh_uint32 next_region_index;
} sh_pez_REGION_VIRTUAL_OBJECT;
#pragma pack()
// Physical plane structure
typedef struct {
sh_uint32 free_pages;
sh_uint32 used_pages;
sh_radix_TREE region_radix_tree;
sh_radix_TREE boundary_radix_tree;
sh_slab_reg_phys_SLAB_ALLOCATOR *slab_reg_phys;
struct sh_slab_radix_node_SLAB_ALLOCATOR *slab_radix_node;
sh_page_PAGE_TABLE_POOL *kernel_ptp;
sh_uint8 *physical_bitmap;
sh_uint64 physical_page_count;
} sh_pez_PHYSICAL_PLANE;
// Virtual plane structure
typedef struct {
sh_uint32 free_pages;
sh_uint32 used_pages;
sh_radix_TREE region_radix_tree;
sh_radix_TREE boundary_radix_tree;
sh_slab_reg_virt_SLAB_ALLOCATOR *slab_reg_virt;
struct sh_slab_radix_node_SLAB_ALLOCATOR *slab_radix_node;
sh_page_PAGE_TABLE_POOL *kernel_ptp;
sh_page_PAGE_TABLE_POOL *reference_ptp;
sh_page_VIRTUAL_ADDRESS plane_offset;
} sh_pez_VIRTUAL_PLANE;
// Set Pez state to true
void sh_pez_set_available();
// Return current state of Pez
sh_bool sh_pez_is_available();
// Return reference Pez physical plane
sh_pez_PHYSICAL_PLANE* sh_pez_get_reference_phys_plane();
// Initialize a physical plane
SH_STATUS sh_pez_init_physical_plane(sh_uint8 *physical_bitmap,sh_uint64 physical_page_count,sh_slab_reg_phys_SLAB_ALLOCATOR *slab_reg_phys,struct sh_slab_radix_node_SLAB_ALLOCATOR *slab_radix_node,sh_page_PAGE_TABLE_POOL *kernel_ptp,sh_pez_PHYSICAL_PLANE *phys_plane);
// Allocate physical pages from the physical plane
SH_STATUS sh_pez_alloc_physical_pages(sh_pez_PHYSICAL_PLANE *phys_plane,sh_uint32 pages_count,sh_page_PHYSICAL_ADDRESS *address);
// Free physical pages from the physical plane
SH_STATUS sh_pez_free_physical_pages(sh_pez_PHYSICAL_PLANE *phys_plane,sh_page_PHYSICAL_ADDRESS *address,sh_uint32 pages_count);
// Debug Pez Physical
SH_STATUS sh_pez_debug_physical(sh_pez_PHYSICAL_PLANE *phys_plane);
// Initialize a virtual plane
SH_STATUS sh_pez_init_virtual_plane(sh_page_VIRTUAL_ADDRESS plane_offset,sh_slab_reg_virt_SLAB_ALLOCATOR *slab_reg_virt,struct sh_slab_radix_node_SLAB_ALLOCATOR *slab_radix_node,sh_page_PAGE_TABLE_POOL *kernel_ptp,sh_page_PAGE_TABLE_POOL *reference_ptp,sh_pez_VIRTUAL_PLANE *virt_plane);
// Allocate virtual space from the virtual plane
SH_STATUS sh_pez_alloc_virtual_pages(sh_pez_VIRTUAL_PLANE *virt_plane,sh_uint32 pages_count,sh_page_VIRTUAL_ADDRESS *address);
// Free virtual space from the virtual plane
SH_STATUS sh_pez_free_virtual_pages(sh_pez_VIRTUAL_PLANE *virt_plane,sh_page_VIRTUAL_ADDRESS *address,sh_uint32 pages_count);
#endif

View File

@@ -0,0 +1,103 @@
// SPDX-License-Identifier: MPL-2.0
#ifndef SH_LIB_PEZ_DEBUG_H
#define SH_LIB_PEZ_DEBUG_H
#include "std/type.h"
#include "std/status.h"
#define SH_PEZ_DEBUG_ALLOC_HEADER 0
#define SH_PEZ_DEBUG_FREE_HEADER 1
#define SH_PEZ_DEBUG_SIZE_LIST_HEADER 2
#define SH_PEZ_DEBUG_BOUNDARY_RADIX_HEADER 3
#define SH_PEZ_DEBUG_BITMAP_REGIONS_HEADER 4
#define SH_PEZ_DEBUG_SIZE_LIST_BLOCK_HEADER 5
// Struct for Pez alloc
#pragma pack(1)
typedef struct {
sh_uint32 size_asked;
sh_uint32 found_region_start_index;
sh_uint32 found_region_size;
sh_bool is_exact_fit;
sh_uint32 remaining_size;
sh_uint32 new_start;
sh_uint32 reg_idx;
} sh_pez_debug_ALLOC;
#pragma pack()
// Struct for Pez free
#pragma pack(1)
typedef struct {
sh_uint32 region_start_freeed;
sh_uint32 size_freeed;
sh_uint32 left_region_idx;
sh_uint32 right_region_idx;
sh_uint32 left_region_size;
sh_uint32 right_region_size;
sh_uint32 left_region_start;
sh_uint32 right_region_start;
sh_uint32 final_inserted_region_idx;
sh_uint32 final_inserted_region_start;
sh_uint32 final_inserted_region_size;
sh_bool was_right_region_object_freeed;
sh_bool was_new_region_object_allocated;
sh_uint32 new_reg_obj_start;
sh_uint32 new_reg_obj_size;
} sh_pez_debug_FREE;
#pragma pack()
// Struct for Pez regions from size list
#pragma pack(1)
typedef struct {
sh_uint32 start;
sh_uint32 size;
sh_uint32 idx;
sh_uint32 next_idx;
} sh_pez_debug_REGION_SIZE_LIST;
#pragma pack()
// Struct for Pez size list
#pragma pack(1)
typedef struct {
sh_uint32 size;
sh_uint32 count;
sh_pez_debug_REGION_SIZE_LIST regions[512];
} sh_pez_debug_SIZE_LIST;
#pragma pack()
// Struct for Pez region from boundary
#pragma pack(1)
typedef struct {
sh_uint32 pos_start;
sh_uint32 idx_start;
sh_uint32 prev_start;
sh_uint32 pos_end;
sh_uint32 idx_end;
sh_uint32 prev_end;
} sh_pez_debug_REGION_BOUNDARY;
#pragma pack()
// Struct for Pez boundary radix
#pragma pack(1)
typedef struct {
sh_uint32 count;
sh_pez_debug_REGION_BOUNDARY regions[512];
} sh_pez_debug_BOUNDARY_RADIX;
#pragma pack()
// Struct for Pez regions from bitmap
#pragma pack(1)
typedef struct {
sh_uint32 start;
sh_uint32 size;
} sh_pez_debug_REGION_BITMAP;
#pragma pack()
// Struct for Pez bitmap
#pragma pack(1)
typedef struct {
sh_uint32 count;
sh_pez_debug_REGION_BITMAP regions[512];
} sh_pez_debug_BITMAP;
#pragma pack()
#endif
// Send an alloc payload
SH_STATUS sh_pez_debug_send_alloc(sh_pez_debug_ALLOC *alloc);
// Send a free payload
SH_STATUS sh_pez_debug_send_free(sh_pez_debug_FREE *free);
// Send a size list payload. Sending size list radix header must be done prior to any size list being send. It consist of SH_PEZ_DEBUG_SIZE_LIST_BLOCK_HEADER and the count of sizes list
SH_STATUS sh_pez_debug_send_size_list(sh_pez_debug_SIZE_LIST *size_list);
// Send a boundary radix payload
SH_STATUS sh_pez_debug_send_boundary_radix(sh_pez_debug_BOUNDARY_RADIX *boundary_radix);
// Send a bitmap payload
SH_STATUS sh_pez_debug_send_bitmap(sh_pez_debug_BITMAP *bitmap);

View File

@@ -0,0 +1,31 @@
// SPDX-License-Identifier: MPL-2.0
#ifndef SH_LIB_RADIX_H
#define SH_LIB_RADIX_H
#include "std/type.h"
#include "memory/page.h"
struct sh_slab_radix_node_SLAB_ALLOCATOR;
#define SH_RADIX_NODE_SIZE_BYTES 128
// Radix node structure
typedef struct {
sh_page_VIRTUAL_ADDRESS ptr[16];
} sh_radix_NODE;
// Radix tree structure
typedef struct {
sh_radix_NODE *root_node;
sh_uint8 depth;
} sh_radix_TREE;
// Return the value at the indicated index. Return SH_STATUS_NOT_FOUND if index indicate an empty ptr. Automatically fill the 16 higher bits in the returned value if index=0, no matter that provided node is a intermediary node or a leaf
SH_STATUS sh_radix_node_read_value(sh_radix_NODE *node,sh_uint8 index,sh_page_VIRTUAL_ADDRESS* value);
// Modify the value at the indicated index. Update the bitmap accordingly, all non-zero value corresponding to 1 in the bitmap, otherwise 0.
SH_STATUS sh_radix_node_set_value(struct sh_slab_radix_node_SLAB_ALLOCATOR *alloc,sh_radix_NODE *node,sh_uint8 index,sh_page_VIRTUAL_ADDRESS value);
// Initialize a radix tree. Fail if depth is greater than 16
SH_STATUS sh_radix_tree_init(struct sh_slab_radix_node_SLAB_ALLOCATOR *alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_radix_TREE *tree,sh_uint8 depth);
// Search in a straight line for a key inside the tree. Stop and return SH_STATUS_NOT_FOUND as soon as it hit a empty ptr where there should be one
SH_STATUS sh_radix_tree_get_value(sh_radix_TREE *tree,sh_uint64 key,sh_page_VIRTUAL_ADDRESS *value);
// Insert a value inside the tree. Can allocate new nodes if necessary. Will overwrite previous value if there was one already inserted. Automatically update bitmap on his path
SH_STATUS sh_radix_tree_insert_value(struct sh_slab_radix_node_SLAB_ALLOCATOR *alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_radix_TREE *tree,sh_uint64 key,sh_page_VIRTUAL_ADDRESS value);
// Delete a value and deallocates all nodes (including leaf) that form a path to it if this deletion make them empty
SH_STATUS sh_radix_tree_delete_value(struct sh_slab_radix_node_SLAB_ALLOCATOR *alloc,sh_radix_TREE *tree,sh_uint64 key);
// Return the value that has the smallest key equal or greater than the provided key. Can't allocate new nodes.
SH_STATUS sh_radix_tree_search_smallest_min_bound(struct sh_slab_radix_node_SLAB_ALLOCATOR *alloc,sh_radix_TREE *tree,sh_uint64 lower_bound_key,sh_page_VIRTUAL_ADDRESS *value);
#endif

View File

@@ -0,0 +1,18 @@
// SPDX-License-Identifier: MPL-2.0
#ifndef SH_LIB_RING_H
#define SH_LIB_RING_H
#include "std/type.h"
#include "std/status.h"
// Ring buffer header structure
typedef struct {
sh_uint32 head;
sh_uint32 tail;
sh_uint32 buffer_bytes_size;
sh_uint8* data_start;
sh_uint64 total_bytes_written;
} sh_ring_RING_BUFFER_HEADER;
// Write a byte into the provided ring buffer
SH_STATUS sh_ring_write_byte(sh_ring_RING_BUFFER_HEADER *ring_buffer,sh_uint8 byte);
// Write a null terminated string into the provided ring buffer
SH_STATUS sh_ring_write_string(sh_ring_RING_BUFFER_HEADER *ring_buffer,char *string);
#endif

View File

@@ -0,0 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
// This file serve the purpose of including all types of slabs allocators
#include "memory/slabs/slab_reg_phys.h"
#include "memory/slabs/slab_reg_virt.h"
#include "memory/slabs/slab_radix_node.h"
#include "memory/slabs/slab_generic.h"

View File

@@ -0,0 +1,60 @@
// SPDX-License-Identifier: MPL-2.0
// This is the implementation for the slab allocator for generic sizes
#ifndef SH_LIB_SLAB_GENERIC_H
#define SH_LIB_SLAB_GENERIC_H
#include "memory/page.h"
#include "std/status.h"
#include "std/type.h"
#include "memory/pba.h"
#define SH_SLAB_GENERIC_SLAB_DATA_PAGES {1,2,4,8,16,32,64,128}
#define SH_SLAB_GENERIC_OBJECT_SIZE_BYTES {8,16,32,64,128,256,512,1024}
#define SH_SLAB_GENERIC_OBJECTS_PER_SLAB 512
#define SH_SLAB_GENERIC_ACTUAL_OBJECTS_PER_SLAB {496,504,508,510,511,511,511,511}
#define SH_SLAB_GENERIC_SLAB_BITMAP_INIT {0xFFFF,0xFF,0xF,0x3,1,1,1,1}
#define SH_SLAB_GENERIC_SLAB_BITMAP_SIZE_BYTES 64
#define SH_SLAB_GENERIC_NULL_REF (sh_uint64)0
#define SH_SLAB_GENERIC_SLAB_SIG {'S','h','S','l','G','e','n','e'}
#define SH_SLAB_GENERIC_MAGIC 0x656E65476C536853ULL // little endian
// Generic slab structure
#pragma pack(1)
typedef struct sh_slab_generic_SLAB {
sh_uint8 sig[8];
sh_uint16 used_count;
sh_uint64 slab_index;
struct sh_slab_generic_SLAB* next_slab;
struct sh_slab_generic_SLAB* prev_slab;
struct sh_slab_generic_SLAB* next_partial;
struct sh_slab_generic_SLAB* prev_partial;
sh_uint8 padding[14];
sh_uint64 free_bitmap[SH_SLAB_GENERIC_SLAB_BITMAP_SIZE_BYTES/8]; // First objects will be mark as unavailable due to being replaced by the header and bitmap, their amount depend on the object size
} sh_slab_generic_SLAB;
#pragma pack()
// Radix node slab allocator structure
struct sh_slab_generic_SLAB_ALLOCATOR {
sh_slab_generic_SLAB* first_slab;
sh_slab_generic_SLAB* partial_head;
sh_uint64 level;
sh_uint64 slab_count;
sh_uint64 object_size_bytes;
sh_uint64 object_per_slab;
sh_uint64 actual_object_per_slab;
sh_uint64 bitmap_init;
sh_uint64 slab_pages_count;
sh_pba_PAGE_BLOCK_ALLOCATOR* pba;
};
typedef sh_uint16 sh_slab_generic_OBJECT_INDEX_IN_SLAB;
// Initialize slab allocator structure. Does not allocate any slab
SH_STATUS sh_slab_generic_alloc_init(sh_uint8 level,struct sh_slab_generic_SLAB_ALLOCATOR* slab_alloc,sh_pba_PAGE_BLOCK_ALLOCATOR *pba);
// Allocate a new slab, initialize it and put it into the allocator. If new slab isn't the first to be allocated, push it on the partial slab list
SH_STATUS sh_slab_generic_add_slab(struct sh_slab_generic_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_slab_generic_SLAB** out_slab);
// Obtain a pointer to the first partial slab. Does not scan the slabs. Allocate a new slab if necessary
SH_STATUS sh_slab_generic_get_partial_slab(struct sh_slab_generic_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL* ptp,sh_slab_generic_SLAB** found_slab);
// Rescan all the slabs to rebuild partial list in case of doubt. Does not modify alloc->slab_count, any slab->free_bitmap or any slab>->nodes
SH_STATUS sh_slab_generic_scan_slabs(struct sh_slab_generic_SLAB_ALLOCATOR* alloc);
// Return a valid pointer to an empty object slot as well as the object index in the corresponding index. Slabs allocation is automated by sh_slab_generic_get_partial_slab
SH_STATUS sh_slab_generic_find_free_object(struct sh_slab_generic_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,void** out,sh_slab_generic_OBJECT_INDEX_IN_SLAB* index_in_slab);
// Allocate one new object. Return a pointer to the struct of the new object. Since it call sh_slab_generic_find_free_object, it can allocate new slab
SH_STATUS sh_slab_generic_alloc(struct sh_slab_generic_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,void** out_obj);
// Dellocate one radix node object provided as pointer.
SH_STATUS sh_slab_generic_dealloc(struct sh_slab_generic_SLAB_ALLOCATOR* alloc,void *object_ptr);
#endif

View File

@@ -0,0 +1,59 @@
// SPDX-License-Identifier: MPL-2.0
// This is the implementation for the slab allocator for sh_radix_NODE
#ifndef SH_LIB_SLAB_RADIX_NODE_H
#define SH_LIB_SLAB_RADIX_NODE_H
#include "memory/page.h"
#include "std/status.h"
#include "std/type.h"
#include "memory/pez/radix.h"
#include "memory/pba.h"
#define SH_SLAB_RADIX_NODE_SLAB_DATA_PAGES (sh_uint64)32
#define SH_SLAB_RADIX_NODE_SLAB_DATA_SIZE_BYTES (sh_uint64)(SH_SLAB_RADIX_NODE_SLAB_DATA_PAGES*4096)
#define SH_SLAB_RADIX_NODE_OBJECT_SIZE_BYTES (sh_uint64)128
#define SH_SLAB_RADIX_NODE_OBJECTS_PER_SLAB (sh_uint64)1024
#define SH_SLAB_RADIX_NODE_ACTUAL_OBJECTS_PER_SLAB (sh_uint64)1006
#define SH_SLAB_RADIX_NODE_SLAB_BITMAP_SIZE_BYTES (sh_uint64)(SH_SLAB_RADIX_NODE_OBJECTS_PER_SLAB/8)
#define SH_SLAB_RADIX_NODE_NULL_REF (sh_uint64)0
#define SH_SLAB_RADIX_NODE_SLAB_SIG {'S','h','S','l','R','a','N','o'}
#define SH_SLAB_RADIX_NODE_MAGIC 0x6F4E61526C536853ULL // little endian
// Radix node slab structure
#pragma pack(1)
typedef struct sh_slab_radix_node_SLAB {
sh_uint8 sig[8];
sh_uint16 used_count;
sh_uint64 slab_index;
struct sh_slab_radix_node_SLAB* next_slab;
struct sh_slab_radix_node_SLAB* prev_slab;
struct sh_slab_radix_node_SLAB* next_partial;
struct sh_slab_radix_node_SLAB* prev_partial;
sh_uint8 padding[78];
sh_uint64 free_bitmap[16]; // First 18 objects will be mark as unavailable due to being replaced by the header and bitmap
sh_uint16 node_bitmap[1024];
sh_radix_NODE nodes[SH_SLAB_RADIX_NODE_ACTUAL_OBJECTS_PER_SLAB];
} sh_slab_radix_node_SLAB;
#pragma pack()
// Radix node slab allocator structure
struct sh_slab_radix_node_SLAB_ALLOCATOR {
sh_slab_radix_node_SLAB* first_slab;
sh_slab_radix_node_SLAB* partial_head;
sh_uint64 slab_count;
sh_pba_PAGE_BLOCK_ALLOCATOR* pba;
};
typedef sh_uint16 sh_slab_radix_node_NODE_INDEX_IN_SLAB;
// Initialize slab allocator structure. Does not allocate any slab
SH_STATUS sh_slab_radix_node_alloc_init(struct sh_slab_radix_node_SLAB_ALLOCATOR* slab_alloc,sh_pba_PAGE_BLOCK_ALLOCATOR *pba);
// Allocate a new slab, initialize it and put it into the allocator. If new slab isn't the first to be allocated, push it on the partial slab list
SH_STATUS sh_slab_radix_node_add_slab(struct sh_slab_radix_node_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_slab_radix_node_SLAB** out_slab);
// Obtain a pointer to the first partial slab. Does not scan the slabs. Allocate a new slab if necessary
SH_STATUS sh_slab_radix_node_get_partial_slab(struct sh_slab_radix_node_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL* ptp,sh_slab_radix_node_SLAB** found_slab);
// Rescan all the slabs to set all metadatas in case of doubt. Does not modify alloc->slab_count or <any slab>->nodes
SH_STATUS sh_slab_radix_node_scan_slabs(struct sh_slab_radix_node_SLAB_ALLOCATOR* alloc);
// Return a valid pointer to an empty object slot as well as the object index in the corresponding index. Slabs allocation is automated by sh_slab_radix_node_get_partial_slab
SH_STATUS sh_slab_radix_node_find_free_object(struct sh_slab_radix_node_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_radix_NODE** out,sh_slab_radix_node_NODE_INDEX_IN_SLAB* index_in_slab);
// Allocate one new radix node object. Return a pointer to the struct of the new object. Since it call sh_slab_reg_phys_find_free_object, it can allocate new slab
SH_STATUS sh_slab_radix_node_alloc(struct sh_slab_radix_node_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_radix_NODE** out_obj);
// Dellocate one radix node object provided as pointer.
SH_STATUS sh_slab_radix_node_dealloc(struct sh_slab_radix_node_SLAB_ALLOCATOR* alloc,sh_radix_NODE *object_ptr);
// Return a pointer to the bitmap of a node
sh_uint16 *sh_slab_radix_node_get_node_bitmap(struct sh_slab_radix_node_SLAB_ALLOCATOR* alloc,sh_radix_NODE *object_ptr);
#endif

View File

@@ -0,0 +1,58 @@
// SPDX-License-Identifier: MPL-2.0
// This is the implementation for the slab allocator for sh_pez_REGION_PHYSICAL_OBJECT
#ifndef SH_LIB_SLAB_REG_PHYS_H
#define SH_LIB_SLAB_REG_PHYS_H
#include "std/type.h"
#include "std/status.h"
#include "memory/page.h"
#include "memory/vmem_layout.h"
#define SH_SLAB_REG_PHYS_DATA_VA SH_VMEM_LAYOUT_SLAB_REG_PHYS_VA
#define SH_SLAB_REG_PHYS_MAX_SLAB (sh_uint64)(16*1024)
#define SH_SLAB_REG_PHYS_SLAB_DATA_PAGES (sh_uint64)3
#define SH_SLAB_REG_PHYS_SLAB_DATA_SIZE_BYTES (sh_uint64)(SH_SLAB_REG_PHYS_SLAB_DATA_PAGES*4096)
#define SH_SLAB_REG_PHYS_OBJECT_SIZE_BYTES (sh_uint64)12
#define SH_SLAB_REG_PHYS_OBJECTS_PER_SLAB (sh_uint64)1024
#define SH_SLAB_REG_PHYS_ACTUAL_OBJECTS_PER_SLAB (sh_uint64)1023
#define SH_SLAB_REG_PHYS_SLAB_BITMAP_SIZE_BYTES (sh_uint64)(SH_SLAB_REG_PHYS_OBJECTS_PER_SLAB/8)
#define SH_SLAB_REG_PHYS_NULL_REF (sh_uint64)0
// Physical region slab structure
typedef struct sh_slab_reg_phys_SLAB_STRUCT {
sh_uint16 used_count;
sh_uint16 slab_index;
struct sh_slab_reg_phys_SLAB_STRUCT* next;
struct sh_slab_reg_phys_SLAB_STRUCT* prev;
sh_uint64 free_bitmap[16];
} sh_slab_reg_phys_SLAB_STRUCT;
#define SH_SLAB_REG_PHYS_HEADER_LIST_SIZE_BYTES SH_SLAB_REG_PHYS_MAX_SLAB*sizeof(sh_slab_reg_phys_SLAB_STRUCT)
// Physical region object slab allocator structure
typedef struct {
sh_slab_reg_phys_SLAB_STRUCT* slabs_header;
sh_uint8* slabs_data;
sh_uint16 max_slabs;
sh_uint16 slab_count;
sh_slab_reg_phys_SLAB_STRUCT* partial_head;
} sh_slab_reg_phys_SLAB_ALLOCATOR;
typedef sh_uint32 sh_slab_reg_phys_OBJECT_INDEX;
// Return slab index
#define SH_SLAB_REG_PHYS_REF_SLAB(ref) (sh_uint16)((ref)>>10)
// Return object index inside a slab
#define SH_SLAB_REG_PHYS_REF_OBJECT(ref) (sh_uint16)((ref) & 0x3FF)
// Make a valid object index from a slab index and an object index inside this slab
#define SH_SLAB_REG_PHYS_MAKE_REF(slab,obj) (((sh_slab_reg_phys_OBJECT_INDEX)(slab)<<10) | (sh_slab_reg_phys_OBJECT_INDEX)(obj))
// Initialize slab allocator structure. Does not allocate any slabs
SH_STATUS sh_slab_reg_phys_alloc_init(sh_slab_reg_phys_SLAB_ALLOCATOR* slab_alloc,sh_page_PAGE_TABLE_POOL *ptp);
// Allocate a new slab, initialize it and put it into the allocator. If new slab isn't the first to be allocated, push it on the partial slab list
SH_STATUS sh_slab_reg_phys_add_slab(sh_slab_reg_phys_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_slab_reg_phys_SLAB_STRUCT** out_slab);
// Obtain a pointer to the first partial slab. Does not scan the slabs. Allocate a new slab if necessary
SH_STATUS sh_slab_reg_phys_get_partial_slab(sh_slab_reg_phys_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL* ptp,sh_slab_reg_phys_SLAB_STRUCT** found_slab);
// Rescan all the slabs to set all metadatas in case of doubt. Does not modify alloc->slab_count, alloc->slabs_data
SH_STATUS sh_slab_reg_phys_scan_slabs(sh_slab_reg_phys_SLAB_ALLOCATOR* alloc);
// Return a valid 24 bits index to a empty object slot. Slabs allocation is automated by sh_slab_reg_phys_get_partial_slab
SH_STATUS sh_slab_reg_phys_find_free_object(sh_slab_reg_phys_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_slab_reg_phys_OBJECT_INDEX* out_ref);
// Return a pointer to referenced physical region object
void* sh_slab_reg_phys_ref_to_ptr(sh_slab_reg_phys_SLAB_ALLOCATOR* alloc,sh_slab_reg_phys_OBJECT_INDEX ref);
// Allocate one new physical region object. Return an index to access and deallocate the object. Since it call sh_slab_reg_phys_find_free_object, it can allocate new slab
SH_STATUS sh_slab_reg_phys_alloc(sh_slab_reg_phys_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_slab_reg_phys_OBJECT_INDEX* out_index);
// Dellocate one physical region object provided as index.
SH_STATUS sh_slab_reg_phys_dealloc(sh_slab_reg_phys_SLAB_ALLOCATOR* alloc,sh_slab_reg_phys_OBJECT_INDEX index);
#endif

View File

@@ -0,0 +1,58 @@
// SPDX-License-Identifier: MPL-2.0
// This is the implementation for the slab allocator for sh_pez_REGION_VIRTUAL_OBJECT
#ifndef SH_LIB_SLAB_REG_VIRT_H
#define SH_LIB_SLAB_REG_VIRT_H
#include "std/type.h"
#include "std/status.h"
#include "memory/page.h"
#include "memory/vmem_layout.h"
#define SH_SLAB_REG_VIRT_DATA_VA SH_VMEM_LAYOUT_SLAB_REG_VIRT_VA
#define SH_SLAB_REG_VIRT_MAX_SLAB (sh_uint64)(512*1024)
#define SH_SLAB_REG_VIRT_SLAB_DATA_PAGES (sh_uint64)3
#define SH_SLAB_REG_VIRT_SLAB_DATA_SIZE_BYTES (sh_uint64)(SH_SLAB_REG_VIRT_SLAB_DATA_PAGES*4096)
#define SH_SLAB_REG_VIRT_OBJECT_SIZE_BYTES (sh_uint64)12
#define SH_SLAB_REG_VIRT_OBJECTS_PER_SLAB (sh_uint64)1024
#define SH_SLAB_REG_VIRT_ACTUAL_OBJECTS_PER_SLAB (sh_uint64)1023
#define SH_SLAB_REG_VIRT_SLAB_BITMAP_SIZE_BYTES (sh_uint64)(SH_SLAB_REG_VIRT_OBJECTS_PER_SLAB/8)
#define SH_SLAB_REG_VIRT_NULL_REF (sh_uint64)0
// Virtual region slab structure
typedef struct sh_slab_reg_virt_SLAB_STRUCT {
sh_uint16 used_count;
sh_uint32 slab_index;
struct sh_slab_reg_virt_SLAB_STRUCT* next;
struct sh_slab_reg_virt_SLAB_STRUCT* prev;
sh_uint64 free_bitmap[16];
} sh_slab_reg_virt_SLAB_STRUCT;
#define SH_SLAB_REG_VIRT_HEADER_LIST_SIZE_BYTES SH_SLAB_REG_VIRT_MAX_SLAB*sizeof(sh_slab_reg_virt_SLAB_STRUCT)
// Virtual region object slab allocator structure
typedef struct {
sh_slab_reg_virt_SLAB_STRUCT* slabs_header;
sh_uint8* slabs_data;
sh_uint32 max_slabs;
sh_uint32 slab_count;
sh_slab_reg_virt_SLAB_STRUCT* partial_head;
} sh_slab_reg_virt_SLAB_ALLOCATOR;
typedef sh_uint32 sh_slab_reg_virt_OBJECT_INDEX;
// Return slab index
#define SH_SLAB_REG_VIRT_REF_SLAB(ref) (sh_uint32)((ref)>>10)
// Return object index inside a slab
#define SH_SLAB_REG_VIRT_REF_OBJECT(ref) (sh_uint16)((ref) & 0x3FF)
// Make a valid object index from a slab index and an object index inside this slab
#define SH_SLAB_REG_VIRT_MAKE_REF(slab,obj) (((sh_slab_reg_virt_OBJECT_INDEX)(slab)<<10) | (sh_slab_reg_virt_OBJECT_INDEX)(obj))
// Initialize slab allocator structure. Does not allocate any slabs
SH_STATUS sh_slab_reg_virt_alloc_init(sh_slab_reg_virt_SLAB_ALLOCATOR* slab_alloc,sh_page_PAGE_TABLE_POOL *ptp);
// Allocate a new slab, initialize it and put it into the allocator. If new slab isn't the first to be allocated, push it on the partial slab list
SH_STATUS sh_slab_reg_virt_add_slab(sh_slab_reg_virt_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_slab_reg_virt_SLAB_STRUCT** out_slab);
// Obtain a pointer to the first partial slab. Does not scan the slabs. Allocate a new slab if necessary
SH_STATUS sh_slab_reg_virt_get_partial_slab(sh_slab_reg_virt_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL* ptp,sh_slab_reg_virt_SLAB_STRUCT** found_slab);
// Rescan all the slabs to set all metadatas in case of doubt. Does not modify alloc->slab_count, alloc->slabs_data
SH_STATUS sh_slab_reg_virt_scan_slabs(sh_slab_reg_virt_SLAB_ALLOCATOR* alloc);
// Return a valid 29 bits index to a empty object slot. Slabs allocation is automated by sh_slab_reg_virt_get_partial_slab
SH_STATUS sh_slab_reg_virt_find_free_object(sh_slab_reg_virt_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_slab_reg_virt_OBJECT_INDEX* out_ref);
// Return a pointer to referenced virtual region object
void* sh_slab_reg_virt_ref_to_ptr(sh_slab_reg_virt_SLAB_ALLOCATOR* alloc,sh_slab_reg_virt_OBJECT_INDEX ref);
// Allocate one new virtual region object. Return both a pointer to the struct of the new object and an index to access and deallocte the object. Since it call sh_slab_reg_virt_find_free_object, it can allocate new slab
SH_STATUS sh_slab_reg_virt_alloc(sh_slab_reg_virt_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_slab_reg_virt_OBJECT_INDEX* out_index);
// Dellocate one virtual region object provided as index.
SH_STATUS sh_slab_reg_virt_dealloc(sh_slab_reg_virt_SLAB_ALLOCATOR* alloc,sh_slab_reg_virt_OBJECT_INDEX index);
#endif

View File

@@ -0,0 +1,93 @@
// SPDX-License-Identifier: MPL-2.0
#ifndef SH_LIB_VMEM_LAYOUT_H
#define SH_LIB_VMEM_LAYOUT_H
// HOW TO USE THIS FILE
// This file contain the VA, size in bytes and sometimes the end of virtual memory regions
// that we are sure that the kernel will search into at boot time or allocate things at these
// places.
// This file is autocheck by an automated script to ensure there is no overlapping.
// Any macro ending with _VA will create a new virtual region for the script. It will look for the
// size of this virtual region in another macro that start with the same prefix and end with
// _SIZE_BYTES.
// If a macro ending with _VA doesn't have a corresponding macro ending with _SIZE_BYTES, the
// script will trigger an error and the kernel compilation will fail.
// If a macro ending with _SIZE_BYTES doesn't have a corresponding macro ending with _VA, the
// script will ignore it.
// The start of each virtual region must be aligned to 4096 bytes and the size must be provided
// in bytes.
// Any overlapping virtual region will trigger a compilation error.
// Consider using this file as the source of trust for everything related to static virtual
// regions.
// Any macro that doesn't end with _VA or _SIZE_BYTES or that doesn't correspong to the behaviour
// described above will be ignored.
// TEMPORARY STRUCTURE
// The base for the memory map
#define SH_VMEM_LAYOUT_BOOT_CONFIG_VA 0x00180000
// The size for the memory map
#define SH_VMEM_LAYOUT_BOOT_CONFIG_SIZE_BYTES 4096
// The base for the memory map
#define SH_VMEM_LAYOUT_MEMORY_MAP_VA 0x00190000
// The size for the memory map
#define SH_VMEM_LAYOUT_MEMORY_MAP_SIZE_BYTES 16*4096
// KERNEL HEAP
// The base for big allocations region
#define SH_VMEM_LAYOUT_HEAP_BIG_VA 16LL*1024LL*1024LL*1024LL*1024LL
// The end for big allocations regions
#define SH_VMEM_LAYOUT_HEAP_BIG_SIZE_BYTES (0xFFFFFFFELL)*4096LL
// The base for all generic slabs allocators for the heap, spaced by 12 TB each
#define SH_VMEM_LAYOUT_HEAP_SLAB_LEVEL_0_VA 0x0000200000000000
#define SH_VMEM_LAYOUT_HEAP_SLAB_LEVEL_1_VA 0x00002C0000000000
#define SH_VMEM_LAYOUT_HEAP_SLAB_LEVEL_2_VA 0x0000380000000000
#define SH_VMEM_LAYOUT_HEAP_SLAB_LEVEL_3_VA 0x0000440000000000
#define SH_VMEM_LAYOUT_HEAP_SLAB_LEVEL_4_VA 0x0000500000000000
#define SH_VMEM_LAYOUT_HEAP_SLAB_LEVEL_5_VA 0x00005C0000000000
#define SH_VMEM_LAYOUT_HEAP_SLAB_LEVEL_6_VA 0x0000680000000000
#define SH_VMEM_LAYOUT_HEAP_SLAB_LEVEL_7_VA 0x0000740000000000
// The size for all generic slabs_allocators for the heap
#define SH_VMEM_LAYOUT_HEAP_SLAB_LEVEL_0_SIZE_BYTES 0xBFFFFFFFLL*4096LL
#define SH_VMEM_LAYOUT_HEAP_SLAB_LEVEL_1_SIZE_BYTES 0xBFFFFFFFLL*4096LL
#define SH_VMEM_LAYOUT_HEAP_SLAB_LEVEL_2_SIZE_BYTES 0xBFFFFFFFLL*4096LL
#define SH_VMEM_LAYOUT_HEAP_SLAB_LEVEL_3_SIZE_BYTES 0xBFFFFFFFLL*4096LL
#define SH_VMEM_LAYOUT_HEAP_SLAB_LEVEL_4_SIZE_BYTES 0xBFFFFFFFLL*4096LL
#define SH_VMEM_LAYOUT_HEAP_SLAB_LEVEL_5_SIZE_BYTES 0xBFFFFFFFLL*4096LL
#define SH_VMEM_LAYOUT_HEAP_SLAB_LEVEL_6_SIZE_BYTES 0xBFFFFFFFLL*4096LL
#define SH_VMEM_LAYOUT_HEAP_SLAB_LEVEL_7_SIZE_BYTES 0xBFFFFFFFLL*4096LL
// The spacing in bytes for each slab allocator base
#define SH_VMEM_LAYOUT_HEAP_SLAB_SPACING 0xC0000000000
// KERNEL AND STACKS AREA
// The base for the kernel image area
#define SH_VMEM_LAYOUT_KERNEL_IMAGE_AREA_VA 0xFFFF800000000000ULL
// The size for the kernel image area
#define SH_VMEM_LAYOUT_KERNEL_IMAGE_AREA_SIZE_BYTES 0xFFFF900000000000ULL-1-0xFFFF800000000000ULL
// The base for the stack area
#define SH_VMEM_LAYOUT_STACK_AREA_VA 0xFFFFF00000000000ULL
// The size for the kernel image area
#define SH_VMEM_LAYOUT_STACK_AREA_SIZE_BYTES 0xFFFFFF8000000000ULL-0xFFFFF00000000000ULL
// KERNEL ALLOCATION SPACE
// The base for the kernel allocation space
#define SH_VMEM_LAYOUT_KERNEL_ALLOC_SPACE_VA 0xFFFF900000000000ULL
// The size for the kernel allocation space
#define SH_VMEM_LAYOUT_KERNEL_ALLOC_SPACE_SIZE_BYTES 0xFFFFEFFFFFFFFFFFULL-0xFFFF900000000000ULL
// The end for the kernel allocation space
#define SH_VMEM_LAYOUT_KERNEL_ALLOC_SPACE_VA_END 0xFFFFEFFFFFFFFFFFULL
// SLABS VIRTUAl REGIONS
// The base for the slabs for physical regions objects
#define SH_VMEM_LAYOUT_SLAB_REG_PHYS_VA 0xFFFFFF8000000000ULL+0x4000ULL
// The total size for the slabs for physical regions objects
#define SH_VMEM_LAYOUT_SLAB_REG_PHYS_SIZE_BYTES 192*1024*1024ULL
// The base for the slabs for virtual regions objects
#define SH_VMEM_LAYOUT_SLAB_REG_VIRT_VA 0xFFFFFF800C600000ULL
// The total size for the slabs for virtual regions objects
#define SH_VMEM_LAYOUT_SLAB_REG_VIRT_SIZE_BYTES (1ULL<<29)*12ULL
// The alignement for SH_VMEM_LAYOUT_SLAB_RADIX_NODE_VA
#define SH_VMEM_LAYOUT_PBA_RADIX_NODE_BLOCK_ALIGN (32ULL*4096ULL)
// The base for the PBA for the slabs for radix nodes
#define SH_VMEM_LAYOUT_SLAB_RADIX_NODE_VA ((SH_VMEM_LAYOUT_SLAB_REG_VIRT_VA+SH_VMEM_LAYOUT_SLAB_REG_VIRT_SIZE_BYTES+SH_VMEM_LAYOUT_PBA_RADIX_NODE_BLOCK_ALIGN)/SH_VMEM_LAYOUT_PBA_RADIX_NODE_BLOCK_ALIGN)*SH_VMEM_LAYOUT_PBA_RADIX_NODE_BLOCK_ALIGN
// The total size for the PBA for the slabs for radix nodes
#define SH_VMEM_LAYOUT_SLAB_RADIX_NODE_SIZE_BYTES 64ULL*1024ULL*1024ULL*1024ULL
// LOGGING RING BUFFER
// The base for the logging ring buffer
#define SH_VMEM_LAYOUT_LOGGING_RING_BUFFER_VA 0xFFFFFFFFF0000000ULL
// The max size for the logging ring buffer
#define SH_VMEM_LAYOUT_LOGGING_RING_BUFFER_SIZE_BYTES 0xFFFF*4096
#endif