146 lines
3.1 KiB
C
146 lines
3.1 KiB
C
// Physical memory allocator, for user processes,
|
|
// kernel stacks, page-table pages,
|
|
// and pipe buffers. Allocates whole 4096-byte pages.
|
|
|
|
#include "types.h"
|
|
#include "param.h"
|
|
#include "memlayout.h"
|
|
#include "spinlock.h"
|
|
#include "riscv.h"
|
|
#include "defs.h"
|
|
|
|
void freerange(void *pa_start, void *pa_end);
|
|
|
|
extern char end[]; // first address after kernel.
|
|
// defined by kernel.ld.
|
|
int refcount[(PHYSTOP - KERNBASE) >> PGSHIFT];
|
|
|
|
struct run {
|
|
struct run *next;
|
|
};
|
|
|
|
struct {
|
|
struct spinlock lock;
|
|
struct run *freelist;
|
|
} kmem;
|
|
|
|
void
|
|
kinit()
|
|
{
|
|
initlock(&kmem.lock, "kmem");
|
|
memset(refcount, 0, sizeof(refcount));
|
|
freerange(end, (void*)PHYSTOP);
|
|
}
|
|
int total_free;
|
|
int alloc_count;
|
|
int free_count;
|
|
int count_ref() {
|
|
int count = 0;
|
|
for (int i = 0; i < NELEM(refcount); ++ i) {
|
|
count += (refcount[i] > 0);
|
|
}
|
|
return count;
|
|
}
|
|
|
|
void show_page_count()
|
|
{
|
|
int ref_count = count_ref();
|
|
printf("count ref: %d / %d; alloc=%d, free=%d\n", ref_count, total_free, alloc_count, free_count);
|
|
}
|
|
|
|
void
|
|
freerange(void *pa_start, void *pa_end)
|
|
{
|
|
char *p;
|
|
total_free = alloc_count = free_count = 0;
|
|
p = (char*)PGROUNDUP((uint64)pa_start);
|
|
for(; p + PGSIZE <= (char*)pa_end; p += PGSIZE, total_free ++){
|
|
refcount_add((uint64)p, 1);
|
|
kfree(p);
|
|
}
|
|
show_page_count();
|
|
}
|
|
|
|
// Free the page of physical memory pointed at by pa,
|
|
// which normally should have been returned by a
|
|
// call to kalloc(). (The exception is when
|
|
// initializing the allocator; see kinit above.)
|
|
void
|
|
kfree(void *pa)
|
|
{
|
|
struct run *r;
|
|
|
|
if(((uint64)pa % PGSIZE) != 0 || (char*)pa < end || (uint64)pa >= PHYSTOP)
|
|
panic("kfree");
|
|
|
|
//* [COW] if still has refcount after dec, keep it
|
|
refcount_add((uint64)pa, -1);
|
|
free_count++;
|
|
acquire(&kmem.lock);
|
|
if (refcount_get((uint64)pa) > 0) {
|
|
release(&kmem.lock);
|
|
return;
|
|
} else if (refcount_get((uint64)pa) < 0) {
|
|
panic("kfree: invalid refcount");
|
|
}
|
|
refcount[((uint64)pa - KERNBASE) >> PGSHIFT] = 0;
|
|
release(&kmem.lock);
|
|
|
|
// Fill with junk to catch dangling refs.
|
|
memset(pa, 1, PGSIZE);
|
|
|
|
r = (struct run*)pa;
|
|
|
|
acquire(&kmem.lock);
|
|
if (alloc_count) {
|
|
if (r == 0) panic("kfree r");
|
|
if (kmem.freelist == 0) panic("kfree");
|
|
}
|
|
r->next = kmem.freelist;
|
|
kmem.freelist = r;
|
|
release(&kmem.lock);
|
|
}
|
|
|
|
// Allocate one 4096-byte page of physical memory.
|
|
// Returns a pointer that the kernel can use.
|
|
// Returns 0 if the memory cannot be allocated.
|
|
void *
|
|
kalloc(void)
|
|
{
|
|
struct run *r;
|
|
|
|
acquire(&kmem.lock);
|
|
r = kmem.freelist;
|
|
if(r) {
|
|
kmem.freelist = r->next;
|
|
int idx = ((uint64)r - KERNBASE) >> PGSHIFT;
|
|
if (idx < 0 || idx >= NELEM(refcount)) panic("!!!");
|
|
refcount[idx] = 1;
|
|
alloc_count ++;
|
|
} else {
|
|
show_page_count();
|
|
}
|
|
release(&kmem.lock);
|
|
|
|
if(r)
|
|
memset((char*)r, 5, PGSIZE); // fill with junk
|
|
return (void*)r;
|
|
}
|
|
|
|
void
|
|
refcount_add(uint64 pa, int val)
|
|
{
|
|
acquire(&kmem.lock);
|
|
int idx = (pa - KERNBASE) >> PGSHIFT;
|
|
if (idx < 0 || idx >= NELEM(refcount)) panic("!!!");
|
|
refcount[idx] += val;
|
|
release(&kmem.lock);
|
|
}
|
|
|
|
int
|
|
refcount_get(uint64 pa)
|
|
{
|
|
int idx = (pa - KERNBASE) >> PGSHIFT;
|
|
if (idx < 0 || idx >= NELEM(refcount)) panic("!!!");
|
|
return refcount[idx];
|
|
} |