First commit, Vystem v0.1
This commit is contained in:
60
shelter/lib/include/memory/slabs/slab_generic.h
Normal file
60
shelter/lib/include/memory/slabs/slab_generic.h
Normal file
@@ -0,0 +1,60 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
// This is the implementation for the slab allocator for generic sizes
|
||||
#ifndef SH_LIB_SLAB_GENERIC_H
|
||||
#define SH_LIB_SLAB_GENERIC_H
|
||||
#include "memory/page.h"
|
||||
#include "std/status.h"
|
||||
#include "std/type.h"
|
||||
#include "memory/pba.h"
|
||||
#define SH_SLAB_GENERIC_SLAB_DATA_PAGES {1,2,4,8,16,32,64,128}
|
||||
#define SH_SLAB_GENERIC_OBJECT_SIZE_BYTES {8,16,32,64,128,256,512,1024}
|
||||
#define SH_SLAB_GENERIC_OBJECTS_PER_SLAB 512
|
||||
#define SH_SLAB_GENERIC_ACTUAL_OBJECTS_PER_SLAB {496,504,508,510,511,511,511,511}
|
||||
#define SH_SLAB_GENERIC_SLAB_BITMAP_INIT {0xFFFF,0xFF,0xF,0x3,1,1,1,1}
|
||||
#define SH_SLAB_GENERIC_SLAB_BITMAP_SIZE_BYTES 64
|
||||
#define SH_SLAB_GENERIC_NULL_REF (sh_uint64)0
|
||||
#define SH_SLAB_GENERIC_SLAB_SIG {'S','h','S','l','G','e','n','e'}
|
||||
#define SH_SLAB_GENERIC_MAGIC 0x656E65476C536853ULL // little endian
|
||||
// Generic slab structure
|
||||
#pragma pack(1)
|
||||
typedef struct sh_slab_generic_SLAB {
|
||||
sh_uint8 sig[8];
|
||||
sh_uint16 used_count;
|
||||
sh_uint64 slab_index;
|
||||
struct sh_slab_generic_SLAB* next_slab;
|
||||
struct sh_slab_generic_SLAB* prev_slab;
|
||||
struct sh_slab_generic_SLAB* next_partial;
|
||||
struct sh_slab_generic_SLAB* prev_partial;
|
||||
sh_uint8 padding[14];
|
||||
sh_uint64 free_bitmap[SH_SLAB_GENERIC_SLAB_BITMAP_SIZE_BYTES/8]; // First objects will be mark as unavailable due to being replaced by the header and bitmap, their amount depend on the object size
|
||||
} sh_slab_generic_SLAB;
|
||||
#pragma pack()
|
||||
// Radix node slab allocator structure
|
||||
struct sh_slab_generic_SLAB_ALLOCATOR {
|
||||
sh_slab_generic_SLAB* first_slab;
|
||||
sh_slab_generic_SLAB* partial_head;
|
||||
sh_uint64 level;
|
||||
sh_uint64 slab_count;
|
||||
sh_uint64 object_size_bytes;
|
||||
sh_uint64 object_per_slab;
|
||||
sh_uint64 actual_object_per_slab;
|
||||
sh_uint64 bitmap_init;
|
||||
sh_uint64 slab_pages_count;
|
||||
sh_pba_PAGE_BLOCK_ALLOCATOR* pba;
|
||||
};
|
||||
typedef sh_uint16 sh_slab_generic_OBJECT_INDEX_IN_SLAB;
|
||||
// Initialize slab allocator structure. Does not allocate any slab
|
||||
SH_STATUS sh_slab_generic_alloc_init(sh_uint8 level,struct sh_slab_generic_SLAB_ALLOCATOR* slab_alloc,sh_pba_PAGE_BLOCK_ALLOCATOR *pba);
|
||||
// Allocate a new slab, initialize it and put it into the allocator. If new slab isn't the first to be allocated, push it on the partial slab list
|
||||
SH_STATUS sh_slab_generic_add_slab(struct sh_slab_generic_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_slab_generic_SLAB** out_slab);
|
||||
// Obtain a pointer to the first partial slab. Does not scan the slabs. Allocate a new slab if necessary
|
||||
SH_STATUS sh_slab_generic_get_partial_slab(struct sh_slab_generic_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL* ptp,sh_slab_generic_SLAB** found_slab);
|
||||
// Rescan all the slabs to rebuild partial list in case of doubt. Does not modify alloc->slab_count, any slab->free_bitmap or any slab>->nodes
|
||||
SH_STATUS sh_slab_generic_scan_slabs(struct sh_slab_generic_SLAB_ALLOCATOR* alloc);
|
||||
// Return a valid pointer to an empty object slot as well as the object index in the corresponding index. Slabs allocation is automated by sh_slab_generic_get_partial_slab
|
||||
SH_STATUS sh_slab_generic_find_free_object(struct sh_slab_generic_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,void** out,sh_slab_generic_OBJECT_INDEX_IN_SLAB* index_in_slab);
|
||||
// Allocate one new object. Return a pointer to the struct of the new object. Since it call sh_slab_generic_find_free_object, it can allocate new slab
|
||||
SH_STATUS sh_slab_generic_alloc(struct sh_slab_generic_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,void** out_obj);
|
||||
// Dellocate one radix node object provided as pointer.
|
||||
SH_STATUS sh_slab_generic_dealloc(struct sh_slab_generic_SLAB_ALLOCATOR* alloc,void *object_ptr);
|
||||
#endif
|
||||
59
shelter/lib/include/memory/slabs/slab_radix_node.h
Normal file
59
shelter/lib/include/memory/slabs/slab_radix_node.h
Normal file
@@ -0,0 +1,59 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
// This is the implementation for the slab allocator for sh_radix_NODE
|
||||
#ifndef SH_LIB_SLAB_RADIX_NODE_H
|
||||
#define SH_LIB_SLAB_RADIX_NODE_H
|
||||
#include "memory/page.h"
|
||||
#include "std/status.h"
|
||||
#include "std/type.h"
|
||||
#include "memory/pez/radix.h"
|
||||
#include "memory/pba.h"
|
||||
#define SH_SLAB_RADIX_NODE_SLAB_DATA_PAGES (sh_uint64)32
|
||||
#define SH_SLAB_RADIX_NODE_SLAB_DATA_SIZE_BYTES (sh_uint64)(SH_SLAB_RADIX_NODE_SLAB_DATA_PAGES*4096)
|
||||
#define SH_SLAB_RADIX_NODE_OBJECT_SIZE_BYTES (sh_uint64)128
|
||||
#define SH_SLAB_RADIX_NODE_OBJECTS_PER_SLAB (sh_uint64)1024
|
||||
#define SH_SLAB_RADIX_NODE_ACTUAL_OBJECTS_PER_SLAB (sh_uint64)1006
|
||||
#define SH_SLAB_RADIX_NODE_SLAB_BITMAP_SIZE_BYTES (sh_uint64)(SH_SLAB_RADIX_NODE_OBJECTS_PER_SLAB/8)
|
||||
#define SH_SLAB_RADIX_NODE_NULL_REF (sh_uint64)0
|
||||
#define SH_SLAB_RADIX_NODE_SLAB_SIG {'S','h','S','l','R','a','N','o'}
|
||||
#define SH_SLAB_RADIX_NODE_MAGIC 0x6F4E61526C536853ULL // little endian
|
||||
// Radix node slab structure
|
||||
#pragma pack(1)
|
||||
typedef struct sh_slab_radix_node_SLAB {
|
||||
sh_uint8 sig[8];
|
||||
sh_uint16 used_count;
|
||||
sh_uint64 slab_index;
|
||||
struct sh_slab_radix_node_SLAB* next_slab;
|
||||
struct sh_slab_radix_node_SLAB* prev_slab;
|
||||
struct sh_slab_radix_node_SLAB* next_partial;
|
||||
struct sh_slab_radix_node_SLAB* prev_partial;
|
||||
sh_uint8 padding[78];
|
||||
sh_uint64 free_bitmap[16]; // First 18 objects will be mark as unavailable due to being replaced by the header and bitmap
|
||||
sh_uint16 node_bitmap[1024];
|
||||
sh_radix_NODE nodes[SH_SLAB_RADIX_NODE_ACTUAL_OBJECTS_PER_SLAB];
|
||||
} sh_slab_radix_node_SLAB;
|
||||
#pragma pack()
|
||||
// Radix node slab allocator structure
|
||||
struct sh_slab_radix_node_SLAB_ALLOCATOR {
|
||||
sh_slab_radix_node_SLAB* first_slab;
|
||||
sh_slab_radix_node_SLAB* partial_head;
|
||||
sh_uint64 slab_count;
|
||||
sh_pba_PAGE_BLOCK_ALLOCATOR* pba;
|
||||
};
|
||||
typedef sh_uint16 sh_slab_radix_node_NODE_INDEX_IN_SLAB;
|
||||
// Initialize slab allocator structure. Does not allocate any slab
|
||||
SH_STATUS sh_slab_radix_node_alloc_init(struct sh_slab_radix_node_SLAB_ALLOCATOR* slab_alloc,sh_pba_PAGE_BLOCK_ALLOCATOR *pba);
|
||||
// Allocate a new slab, initialize it and put it into the allocator. If new slab isn't the first to be allocated, push it on the partial slab list
|
||||
SH_STATUS sh_slab_radix_node_add_slab(struct sh_slab_radix_node_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_slab_radix_node_SLAB** out_slab);
|
||||
// Obtain a pointer to the first partial slab. Does not scan the slabs. Allocate a new slab if necessary
|
||||
SH_STATUS sh_slab_radix_node_get_partial_slab(struct sh_slab_radix_node_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL* ptp,sh_slab_radix_node_SLAB** found_slab);
|
||||
// Rescan all the slabs to set all metadatas in case of doubt. Does not modify alloc->slab_count or <any slab>->nodes
|
||||
SH_STATUS sh_slab_radix_node_scan_slabs(struct sh_slab_radix_node_SLAB_ALLOCATOR* alloc);
|
||||
// Return a valid pointer to an empty object slot as well as the object index in the corresponding index. Slabs allocation is automated by sh_slab_radix_node_get_partial_slab
|
||||
SH_STATUS sh_slab_radix_node_find_free_object(struct sh_slab_radix_node_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_radix_NODE** out,sh_slab_radix_node_NODE_INDEX_IN_SLAB* index_in_slab);
|
||||
// Allocate one new radix node object. Return a pointer to the struct of the new object. Since it call sh_slab_reg_phys_find_free_object, it can allocate new slab
|
||||
SH_STATUS sh_slab_radix_node_alloc(struct sh_slab_radix_node_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_radix_NODE** out_obj);
|
||||
// Dellocate one radix node object provided as pointer.
|
||||
SH_STATUS sh_slab_radix_node_dealloc(struct sh_slab_radix_node_SLAB_ALLOCATOR* alloc,sh_radix_NODE *object_ptr);
|
||||
// Return a pointer to the bitmap of a node
|
||||
sh_uint16 *sh_slab_radix_node_get_node_bitmap(struct sh_slab_radix_node_SLAB_ALLOCATOR* alloc,sh_radix_NODE *object_ptr);
|
||||
#endif
|
||||
58
shelter/lib/include/memory/slabs/slab_reg_phys.h
Normal file
58
shelter/lib/include/memory/slabs/slab_reg_phys.h
Normal file
@@ -0,0 +1,58 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
// This is the implementation for the slab allocator for sh_pez_REGION_PHYSICAL_OBJECT
|
||||
#ifndef SH_LIB_SLAB_REG_PHYS_H
|
||||
#define SH_LIB_SLAB_REG_PHYS_H
|
||||
#include "std/type.h"
|
||||
#include "std/status.h"
|
||||
#include "memory/page.h"
|
||||
#include "memory/vmem_layout.h"
|
||||
#define SH_SLAB_REG_PHYS_DATA_VA SH_VMEM_LAYOUT_SLAB_REG_PHYS_VA
|
||||
#define SH_SLAB_REG_PHYS_MAX_SLAB (sh_uint64)(16*1024)
|
||||
#define SH_SLAB_REG_PHYS_SLAB_DATA_PAGES (sh_uint64)3
|
||||
#define SH_SLAB_REG_PHYS_SLAB_DATA_SIZE_BYTES (sh_uint64)(SH_SLAB_REG_PHYS_SLAB_DATA_PAGES*4096)
|
||||
#define SH_SLAB_REG_PHYS_OBJECT_SIZE_BYTES (sh_uint64)12
|
||||
#define SH_SLAB_REG_PHYS_OBJECTS_PER_SLAB (sh_uint64)1024
|
||||
#define SH_SLAB_REG_PHYS_ACTUAL_OBJECTS_PER_SLAB (sh_uint64)1023
|
||||
#define SH_SLAB_REG_PHYS_SLAB_BITMAP_SIZE_BYTES (sh_uint64)(SH_SLAB_REG_PHYS_OBJECTS_PER_SLAB/8)
|
||||
#define SH_SLAB_REG_PHYS_NULL_REF (sh_uint64)0
|
||||
// Physical region slab structure
|
||||
typedef struct sh_slab_reg_phys_SLAB_STRUCT {
|
||||
sh_uint16 used_count;
|
||||
sh_uint16 slab_index;
|
||||
struct sh_slab_reg_phys_SLAB_STRUCT* next;
|
||||
struct sh_slab_reg_phys_SLAB_STRUCT* prev;
|
||||
sh_uint64 free_bitmap[16];
|
||||
} sh_slab_reg_phys_SLAB_STRUCT;
|
||||
#define SH_SLAB_REG_PHYS_HEADER_LIST_SIZE_BYTES SH_SLAB_REG_PHYS_MAX_SLAB*sizeof(sh_slab_reg_phys_SLAB_STRUCT)
|
||||
// Physical region object slab allocator structure
|
||||
typedef struct {
|
||||
sh_slab_reg_phys_SLAB_STRUCT* slabs_header;
|
||||
sh_uint8* slabs_data;
|
||||
sh_uint16 max_slabs;
|
||||
sh_uint16 slab_count;
|
||||
sh_slab_reg_phys_SLAB_STRUCT* partial_head;
|
||||
} sh_slab_reg_phys_SLAB_ALLOCATOR;
|
||||
typedef sh_uint32 sh_slab_reg_phys_OBJECT_INDEX;
|
||||
// Return slab index
|
||||
#define SH_SLAB_REG_PHYS_REF_SLAB(ref) (sh_uint16)((ref)>>10)
|
||||
// Return object index inside a slab
|
||||
#define SH_SLAB_REG_PHYS_REF_OBJECT(ref) (sh_uint16)((ref) & 0x3FF)
|
||||
// Make a valid object index from a slab index and an object index inside this slab
|
||||
#define SH_SLAB_REG_PHYS_MAKE_REF(slab,obj) (((sh_slab_reg_phys_OBJECT_INDEX)(slab)<<10) | (sh_slab_reg_phys_OBJECT_INDEX)(obj))
|
||||
// Initialize slab allocator structure. Does not allocate any slabs
|
||||
SH_STATUS sh_slab_reg_phys_alloc_init(sh_slab_reg_phys_SLAB_ALLOCATOR* slab_alloc,sh_page_PAGE_TABLE_POOL *ptp);
|
||||
// Allocate a new slab, initialize it and put it into the allocator. If new slab isn't the first to be allocated, push it on the partial slab list
|
||||
SH_STATUS sh_slab_reg_phys_add_slab(sh_slab_reg_phys_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_slab_reg_phys_SLAB_STRUCT** out_slab);
|
||||
// Obtain a pointer to the first partial slab. Does not scan the slabs. Allocate a new slab if necessary
|
||||
SH_STATUS sh_slab_reg_phys_get_partial_slab(sh_slab_reg_phys_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL* ptp,sh_slab_reg_phys_SLAB_STRUCT** found_slab);
|
||||
// Rescan all the slabs to set all metadatas in case of doubt. Does not modify alloc->slab_count, alloc->slabs_data
|
||||
SH_STATUS sh_slab_reg_phys_scan_slabs(sh_slab_reg_phys_SLAB_ALLOCATOR* alloc);
|
||||
// Return a valid 24 bits index to a empty object slot. Slabs allocation is automated by sh_slab_reg_phys_get_partial_slab
|
||||
SH_STATUS sh_slab_reg_phys_find_free_object(sh_slab_reg_phys_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_slab_reg_phys_OBJECT_INDEX* out_ref);
|
||||
// Return a pointer to referenced physical region object
|
||||
void* sh_slab_reg_phys_ref_to_ptr(sh_slab_reg_phys_SLAB_ALLOCATOR* alloc,sh_slab_reg_phys_OBJECT_INDEX ref);
|
||||
// Allocate one new physical region object. Return an index to access and deallocate the object. Since it call sh_slab_reg_phys_find_free_object, it can allocate new slab
|
||||
SH_STATUS sh_slab_reg_phys_alloc(sh_slab_reg_phys_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_slab_reg_phys_OBJECT_INDEX* out_index);
|
||||
// Dellocate one physical region object provided as index.
|
||||
SH_STATUS sh_slab_reg_phys_dealloc(sh_slab_reg_phys_SLAB_ALLOCATOR* alloc,sh_slab_reg_phys_OBJECT_INDEX index);
|
||||
#endif
|
||||
58
shelter/lib/include/memory/slabs/slab_reg_virt.h
Normal file
58
shelter/lib/include/memory/slabs/slab_reg_virt.h
Normal file
@@ -0,0 +1,58 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
// This is the implementation for the slab allocator for sh_pez_REGION_VIRTUAL_OBJECT
|
||||
#ifndef SH_LIB_SLAB_REG_VIRT_H
|
||||
#define SH_LIB_SLAB_REG_VIRT_H
|
||||
#include "std/type.h"
|
||||
#include "std/status.h"
|
||||
#include "memory/page.h"
|
||||
#include "memory/vmem_layout.h"
|
||||
#define SH_SLAB_REG_VIRT_DATA_VA SH_VMEM_LAYOUT_SLAB_REG_VIRT_VA
|
||||
#define SH_SLAB_REG_VIRT_MAX_SLAB (sh_uint64)(512*1024)
|
||||
#define SH_SLAB_REG_VIRT_SLAB_DATA_PAGES (sh_uint64)3
|
||||
#define SH_SLAB_REG_VIRT_SLAB_DATA_SIZE_BYTES (sh_uint64)(SH_SLAB_REG_VIRT_SLAB_DATA_PAGES*4096)
|
||||
#define SH_SLAB_REG_VIRT_OBJECT_SIZE_BYTES (sh_uint64)12
|
||||
#define SH_SLAB_REG_VIRT_OBJECTS_PER_SLAB (sh_uint64)1024
|
||||
#define SH_SLAB_REG_VIRT_ACTUAL_OBJECTS_PER_SLAB (sh_uint64)1023
|
||||
#define SH_SLAB_REG_VIRT_SLAB_BITMAP_SIZE_BYTES (sh_uint64)(SH_SLAB_REG_VIRT_OBJECTS_PER_SLAB/8)
|
||||
#define SH_SLAB_REG_VIRT_NULL_REF (sh_uint64)0
|
||||
// Virtual region slab structure
|
||||
typedef struct sh_slab_reg_virt_SLAB_STRUCT {
|
||||
sh_uint16 used_count;
|
||||
sh_uint32 slab_index;
|
||||
struct sh_slab_reg_virt_SLAB_STRUCT* next;
|
||||
struct sh_slab_reg_virt_SLAB_STRUCT* prev;
|
||||
sh_uint64 free_bitmap[16];
|
||||
} sh_slab_reg_virt_SLAB_STRUCT;
|
||||
#define SH_SLAB_REG_VIRT_HEADER_LIST_SIZE_BYTES SH_SLAB_REG_VIRT_MAX_SLAB*sizeof(sh_slab_reg_virt_SLAB_STRUCT)
|
||||
// Virtual region object slab allocator structure
|
||||
typedef struct {
|
||||
sh_slab_reg_virt_SLAB_STRUCT* slabs_header;
|
||||
sh_uint8* slabs_data;
|
||||
sh_uint32 max_slabs;
|
||||
sh_uint32 slab_count;
|
||||
sh_slab_reg_virt_SLAB_STRUCT* partial_head;
|
||||
} sh_slab_reg_virt_SLAB_ALLOCATOR;
|
||||
typedef sh_uint32 sh_slab_reg_virt_OBJECT_INDEX;
|
||||
// Return slab index
|
||||
#define SH_SLAB_REG_VIRT_REF_SLAB(ref) (sh_uint32)((ref)>>10)
|
||||
// Return object index inside a slab
|
||||
#define SH_SLAB_REG_VIRT_REF_OBJECT(ref) (sh_uint16)((ref) & 0x3FF)
|
||||
// Make a valid object index from a slab index and an object index inside this slab
|
||||
#define SH_SLAB_REG_VIRT_MAKE_REF(slab,obj) (((sh_slab_reg_virt_OBJECT_INDEX)(slab)<<10) | (sh_slab_reg_virt_OBJECT_INDEX)(obj))
|
||||
// Initialize slab allocator structure. Does not allocate any slabs
|
||||
SH_STATUS sh_slab_reg_virt_alloc_init(sh_slab_reg_virt_SLAB_ALLOCATOR* slab_alloc,sh_page_PAGE_TABLE_POOL *ptp);
|
||||
// Allocate a new slab, initialize it and put it into the allocator. If new slab isn't the first to be allocated, push it on the partial slab list
|
||||
SH_STATUS sh_slab_reg_virt_add_slab(sh_slab_reg_virt_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_slab_reg_virt_SLAB_STRUCT** out_slab);
|
||||
// Obtain a pointer to the first partial slab. Does not scan the slabs. Allocate a new slab if necessary
|
||||
SH_STATUS sh_slab_reg_virt_get_partial_slab(sh_slab_reg_virt_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL* ptp,sh_slab_reg_virt_SLAB_STRUCT** found_slab);
|
||||
// Rescan all the slabs to set all metadatas in case of doubt. Does not modify alloc->slab_count, alloc->slabs_data
|
||||
SH_STATUS sh_slab_reg_virt_scan_slabs(sh_slab_reg_virt_SLAB_ALLOCATOR* alloc);
|
||||
// Return a valid 29 bits index to a empty object slot. Slabs allocation is automated by sh_slab_reg_virt_get_partial_slab
|
||||
SH_STATUS sh_slab_reg_virt_find_free_object(sh_slab_reg_virt_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_slab_reg_virt_OBJECT_INDEX* out_ref);
|
||||
// Return a pointer to referenced virtual region object
|
||||
void* sh_slab_reg_virt_ref_to_ptr(sh_slab_reg_virt_SLAB_ALLOCATOR* alloc,sh_slab_reg_virt_OBJECT_INDEX ref);
|
||||
// Allocate one new virtual region object. Return both a pointer to the struct of the new object and an index to access and deallocte the object. Since it call sh_slab_reg_virt_find_free_object, it can allocate new slab
|
||||
SH_STATUS sh_slab_reg_virt_alloc(sh_slab_reg_virt_SLAB_ALLOCATOR* alloc,sh_page_PAGE_TABLE_POOL *ptp,sh_slab_reg_virt_OBJECT_INDEX* out_index);
|
||||
// Dellocate one virtual region object provided as index.
|
||||
SH_STATUS sh_slab_reg_virt_dealloc(sh_slab_reg_virt_SLAB_ALLOCATOR* alloc,sh_slab_reg_virt_OBJECT_INDEX index);
|
||||
#endif
|
||||
Reference in New Issue
Block a user