vm: address-space: implement address space cleanup

This commit is contained in:
2026-03-18 21:07:27 +00:00
parent 24f9ef85bf
commit e03b2e07d0
2 changed files with 109 additions and 18 deletions

View File

@@ -5,7 +5,7 @@
#include <kernel/pmap.h>
#include <kernel/vm.h>
#define ADDRESS_SPACE_COPY_ALL ((size_t) - 1)
#define ADDRESS_SPACE_COPY_ALL ((size_t)-1)
#define ADDRESS_SPACE_F_
struct address_space;

View File

@@ -34,6 +34,7 @@ struct vm_iterator {
/* iterates over the areas in an address space */
struct area_iterator {
struct address_space *it_root;
struct btree *it_list;
struct vm_area *it_area;
virt_addr_t it_search_base, it_search_limit;
virt_addr_t it_base, it_limit;
@@ -44,12 +45,15 @@ enum search_direction {
SEARCH_RIGHT,
};
static kern_status_t address_space_object_destroy(struct object *obj);
static kern_status_t address_space_cleanup(struct object *obj, struct queue *q);
static struct object_type address_space_type = {
.ob_name = "address-space",
.ob_size = sizeof(struct address_space),
.ob_header_offset = offsetof(struct address_space, s_base),
.ob_ops = {
.destroy = address_space_cleanup,
},
};
static struct vm_cache vm_area_cache = {
@@ -59,6 +63,10 @@ static struct vm_cache vm_area_cache = {
/*** INTERNAL UTILITY FUNCTION ************************************************/
static kern_status_t delete_area(
struct vm_area *mapping,
struct address_space *root);
/* this function must be called with `parent` locked */
static void put_entry(struct btree *tree, struct vm_area *child)
{
@@ -105,16 +113,16 @@ static void put_entry(struct btree *tree, struct vm_area *child)
}
static struct vm_area *get_entry(
struct address_space *region,
struct btree *list,
virt_addr_t address,
enum get_entry_flags flags)
{
/* `x` must be to the left of `y` */
#define LEFT_DIFF(x, y) ((y) ? ((y)->vma_base - (x)) : ((size_t) - 1))
#define LEFT_DIFF(x, y) ((y) ? ((y)->vma_base - (x)) : ((size_t)-1))
/* `x` must be to the right of `y` */
#define RIGHT_DIFF(x, y) ((y) ? ((y)->vma_limit - (x)) : ((size_t) - 1))
#define RIGHT_DIFF(x, y) ((y) ? ((y)->vma_limit - (x)) : ((size_t)-1))
struct btree_node *cur = region->s_mappings.b_root;
struct btree_node *cur = list->b_root;
if (!cur) {
return NULL;
}
@@ -345,7 +353,7 @@ static void vm_iterator_begin(
it->it_region = region;
it->it_prot = prot;
it->it_mapping = get_entry(region, base, GET_ENTRY_EXACT);
it->it_mapping = get_entry(&region->s_mappings, base, GET_ENTRY_EXACT);
if (!it->it_mapping) {
return;
}
@@ -409,8 +417,10 @@ static kern_status_t vm_iterator_seek(struct vm_iterator *it, size_t nr_bytes)
it->it_base += nr_bytes;
struct vm_area *next_mapping
= get_entry(it->it_region, it->it_base, GET_ENTRY_EXACT);
struct vm_area *next_mapping = get_entry(
&it->it_region->s_mappings,
it->it_base,
GET_ENTRY_EXACT);
if (!next_mapping) {
it->it_buf = NULL;
it->it_max = 0;
@@ -478,12 +488,14 @@ static void vm_iterator_finish(struct vm_iterator *it)
static void area_iterator_begin(
struct area_iterator *it,
struct address_space *space,
struct btree *area_list,
virt_addr_t base,
virt_addr_t limit)
{
memset(it, 0x0, sizeof *it);
struct vm_area *area = get_entry(space, base, GET_ENTRY_CLOSEST_RIGHT);
struct vm_area *area
= get_entry(area_list, base, GET_ENTRY_CLOSEST_RIGHT);
if (!area) {
return;
}
@@ -552,8 +564,46 @@ end:
return KERN_NO_ENTRY;
}
static void area_iterator_erase(struct area_iterator *it)
static kern_status_t area_iterator_erase(struct area_iterator *it)
{
if (!it->it_root || !it->it_area) {
return KERN_NO_ENTRY;
}
struct btree_node *next = btree_next(&it->it_area->vma_node);
btree_delete(it->it_list, &it->it_area->vma_node);
vm_cache_free(&vm_area_cache, it->it_area);
if (!next) {
goto end;
}
struct vm_area *area = BTREE_CONTAINER(struct vm_area, vma_node, next);
if (!area) {
goto end;
}
if (area->vma_base > it->it_search_limit) {
goto end;
}
it->it_area = area;
it->it_base = area->vma_base;
it->it_limit = area->vma_base;
if (it->it_base < it->it_search_base) {
it->it_base = it->it_search_base;
}
if (it->it_limit > it->it_search_limit) {
it->it_limit = it->it_search_limit;
}
return KERN_OK;
end:
memset(it, 0x0, sizeof *it);
return KERN_NO_ENTRY;
}
/*** PUBLIC API ***************************************************************/
@@ -602,6 +652,35 @@ kern_status_t address_space_create(
return KERN_OK;
}
static void area_unmap(struct vm_area *area)
{
pmap_t pmap = area->vma_space->s_pmap;
virt_addr_t base = area->vma_base;
virt_addr_t limit = area->vma_limit;
for (virt_addr_t i = base; i < limit; i += VM_PAGE_SIZE) {
pmap_remove(pmap, i);
}
}
static kern_status_t address_space_cleanup(struct object *obj, struct queue *q)
{
struct address_space *space = ADDRESS_SPACE_CAST(obj);
struct btree_node *cur = btree_first(&space->s_mappings);
while (cur) {
struct btree_node *next = btree_next(cur);
struct vm_area *area
= BTREE_CONTAINER(struct vm_area, vma_node, cur);
btree_delete(&space->s_mappings, cur);
delete_area(area, space);
vm_cache_free(&vm_area_cache, area);
cur = next;
}
return KERN_OK;
}
kern_status_t address_space_map(
struct address_space *root,
virt_addr_t map_address,
@@ -850,7 +929,7 @@ static kern_status_t delete_area(
&mapping->vma_object->vo_mappings,
&mapping->vma_object_entry);
mapping->vma_object = NULL;
vm_object_unlock_irqrestore(mapping->vma_object, flags);
vm_object_unlock_irqrestore(object, flags);
object_unref(&object->vo_base);
/* don't actually delete the mapping yet. that will be done by
@@ -882,7 +961,12 @@ kern_status_t address_space_unmap(
virt_addr_t unmap_limit = unmap_base + unmap_length - 1;
tracek("unmapping %zx-%zx", unmap_base, unmap_limit);
area_iterator_begin(&it, region, unmap_base, unmap_limit);
area_iterator_begin(
&it,
region,
&region->s_mappings,
unmap_base,
unmap_limit);
while (it.it_area) {
struct vm_area *area = it.it_area;
virt_addr_t area_base = area->vma_base;
@@ -1015,7 +1099,12 @@ kern_status_t address_space_release(
virt_addr_t release_limit = release_base + release_length - 1;
tracek("unreserving %zx-%zx", release_base, release_limit);
area_iterator_begin(&it, space, release_base, release_limit);
area_iterator_begin(
&it,
space,
&space->s_reserved,
release_base,
release_limit);
while (it.it_area) {
struct vm_area *area = it.it_area;
virt_addr_t area_base = area->vma_base;
@@ -1102,9 +1191,9 @@ bool address_space_validate_access(
limit -= 1;
}
/* TODO improve this to not require a per-page loop */
for (virt_addr_t i = base; i < limit;) {
struct vm_area *area = get_entry(region, i, GET_ENTRY_EXACT);
struct vm_area *area
= get_entry(&region->s_mappings, i, GET_ENTRY_EXACT);
if (!area) {
return false;
}
@@ -1176,7 +1265,8 @@ kern_status_t address_space_demand_map(
unsigned long irq_flags;
address_space_lock_irqsave(region, &irq_flags);
struct vm_area *area = get_entry(region, addr, GET_ENTRY_EXACT);
struct vm_area *area
= get_entry(&region->s_mappings, addr, GET_ENTRY_EXACT);
if (!area || !area->vma_object) {
address_space_unlock_irqrestore(region, irq_flags);
return KERN_NO_ENTRY;
@@ -1423,7 +1513,8 @@ kern_status_t address_space_translate(
return vm_virt_to_phys((const void *)in);
}
struct vm_area *area = get_entry(space, in, GET_ENTRY_EXACT);
struct vm_area *area
= get_entry(&space->s_mappings, in, GET_ENTRY_EXACT);
if (!area || !area->vma_object) {
return KERN_NO_ENTRY;
}