Skip to content

Commit cc08dca

Browse files
committed
memory/kmalloc: allow allocating larger pages
It is now possible to allocate buffers larger than the maximum cache size using the kmalloc API. These buffers are allocated using the VM api directly. Their pages flagged as PAGE_LARGE_ALLOC so that we can identify such buffers inside kfree().
1 parent 71ecdba commit cc08dca

3 files changed

Lines changed: 65 additions & 11 deletions

File tree

include/kernel/kmalloc.h

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@
5959
*/
6060
typedef enum kmalloc_flags {
6161
KMALLOC_KERNEL = 0, /* Default allocation flags. */
62-
} kmalloc_flags;
62+
} kmalloc_flags_t;
6363

6464
#define KMALLOC_CACHE_MIN_SIZE 16
6565
#define KMALLOC_CACHE_MAX_SIZE 16384
@@ -97,6 +97,9 @@ static ALWAYS_INLINE __attribute__((const)) int kmalloc_cache_index(size_t size)
9797
*/
9898
void *kmalloc_from_cache(int cache_index, int flags);
9999

100+
/** Allocate a memory buffer too large to fit inside the default caches. */
101+
void *kmalloc_large(size_t size, int flags);
102+
100103
/*
101104
*
102105
*/
@@ -106,7 +109,7 @@ static ALWAYS_INLINE void *kmalloc(size_t size, int flags)
106109

107110
cache_index = kmalloc_cache_index(size);
108111
if (cache_index < 0)
109-
return NULL;
112+
return kmalloc_large(size, flags);
110113

111114
return kmalloc_from_cache(cache_index, flags);
112115
}
@@ -117,7 +120,7 @@ static ALWAYS_INLINE void *kmalloc(size_t size, int flags)
117120
*
118121
* @param nmemb The number of members to allocate
119122
* @param size The size of each members
120-
* @param flags Feature flags, must be a combination of @ref kmalloc_flags
123+
* @param flags Feature flags, must be a combination of @ref kmalloc_flags_t
121124
*
122125
* @return The starting address of the newly allocated area
123126
*/

include/kernel/pmm.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -61,9 +61,10 @@
6161
* @enum page_flags
6262
*/
6363
enum page_flags {
64-
PAGE_AVAILABLE = BIT(0), ///< This page has not been allocated
65-
PAGE_COW = BIT(1), ///< Currently used in a CoW mapping
66-
PAGE_SLAB = BIT(2), ///< Page allocated by the slab allocator
64+
PAGE_AVAILABLE = BIT(0), ///< This page has not been allocated
65+
PAGE_COW = BIT(1), ///< Currently used in a CoW mapping
66+
PAGE_SLAB = BIT(2), ///< Page allocated by the slab allocator
67+
PAGE_LARGE_ALLOC = BIT(3), ///< Page allocated by kmalloc_large()
6768
};
6869

6970
/** Represents a physical pageframe

kernel/memory/kmalloc.c

Lines changed: 55 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,23 +18,58 @@ static const char *kmalloc_cache_names[] = {
1818
"size-4096", "size-8192", "size-16384",
1919
};
2020

21+
/*
22+
*
23+
*/
24+
static inline vm_flags_t kmalloc_flags_to_vm_flags(kmalloc_flags_t flags)
25+
{
26+
vm_flags_t vm_flags = VM_KERNEL_RW;
27+
28+
UNUSED(flags);
29+
30+
return vm_flags;
31+
}
32+
2133
void *kmalloc_from_cache(int cache_index, int flags)
2234
{
2335
return kmem_cache_alloc(kmalloc_size_caches[cache_index], flags);
2436
}
2537

26-
void *kcalloc(size_t nmemb, size_t size, int flags)
38+
/** Allocate a memory buffer too large to fit inside the default caches. */
39+
void *kmalloc_large(size_t size, int flags)
2740
{
28-
if (__builtin_mul_overflow(nmemb, size, &size))
41+
struct vm_segment *vm_segment;
42+
paddr_t paddr;
43+
44+
size = align_up(size, PAGE_SIZE);
45+
flags = kmalloc_flags_to_vm_flags(flags);
46+
47+
if (WARN_ON_MSG(size <= KMALLOC_CACHE_MAX_SIZE, "use kmalloc_from_cache()"))
2948
return NULL;
3049

31-
void *ptr = kmalloc(size, flags);
32-
if (ptr == NULL)
50+
vm_segment = vm_alloc(&kernel_address_space, size, flags);
51+
if (!vm_segment)
3352
return NULL;
3453

35-
return memset(ptr, 0, size);
54+
/* Mark all pages so we know how it was allocated when freeing. */
55+
paddr = mmu_find_physical(vm_segment->start);
56+
for (size_t off = 0; off < size; off += PAGE_SIZE)
57+
address_to_page(paddr)->flags |= PAGE_LARGE_ALLOC;
58+
59+
return (void *)vm_segment->start;
3660
}
3761

62+
/*
63+
* Free a memory buffer that was allocated using kmalloc_large().
64+
*/
65+
static inline void kfree_large_alloc(void *ptr)
66+
{
67+
vm_free(&kernel_address_space, PAGE_ALIGN_DOWN(ptr));
68+
}
69+
70+
/*
71+
*
72+
*/
3873
void kfree(void *ptr)
3974
{
4075
struct page *page;
@@ -46,9 +81,24 @@ void kfree(void *ptr)
4681
paddr = mmu_find_physical((vaddr_t)ptr);
4782
page = address_to_page(paddr);
4883

84+
if (page->flags & PAGE_LARGE_ALLOC)
85+
return kfree_large_alloc(ptr);
86+
4987
kmem_cache_free(page->slab.cache, ptr);
5088
}
5189

90+
void *kcalloc(size_t nmemb, size_t size, int flags)
91+
{
92+
if (__builtin_mul_overflow(nmemb, size, &size))
93+
return NULL;
94+
95+
void *ptr = kmalloc(size, flags);
96+
if (ptr == NULL)
97+
return NULL;
98+
99+
return memset(ptr, 0, size);
100+
}
101+
52102
void *krealloc(void *ptr, size_t size, int flags)
53103
{
54104
struct page *page;

0 commit comments

Comments
 (0)