Compare commits

...

15 Commits

Author SHA1 Message Date
4551e7b2e6 sched: implement various ways to end tasks and threads 2026-03-18 21:07:43 +00:00
e03b2e07d0 vm: address-space: implement address space cleanup 2026-03-18 21:07:27 +00:00
24f9ef85bf sched: implement user-configurable fs and gs segment base addresses 2026-03-18 21:07:05 +00:00
63703a3d34 sched: don't reschedule a thread if its status is THREAD_STOPPED 2026-03-18 21:02:40 +00:00
d801203f04 syscall: vm-object: fix dangling reference to newly-created object 2026-03-18 21:02:19 +00:00
2a1a0cf14d kernel: finish implementation of private and shared futexes 2026-03-18 21:02:09 +00:00
b774415f64 sched: wait: implement wakeup_n, waitqueue_empty 2026-03-18 20:56:15 +00:00
04d05adbe8 kernel: handle: implement handle_table_destroy() 2026-03-18 20:55:35 +00:00
c0e212ac98 x86_64: panic: fix incorrect kernel stack traversal 2026-03-18 20:54:49 +00:00
88405233a8 vm: object: implement object cleanup 2026-03-18 20:53:56 +00:00
42a293e753 x86_64: pmap: implement pmap_destroy() 2026-03-18 20:53:24 +00:00
1eef23ea98 thread: store struct msg on the stack instead of in the thread 2026-03-18 20:52:47 +00:00
30c9c9db45 kernel: add futex definitions 2026-03-15 22:22:58 +00:00
c1e0b38952 vm: object: add missing include 2026-03-15 22:22:43 +00:00
8a38d940cc vm: address-space: add function to translate virtual addresses to physical 2026-03-15 22:22:25 +00:00
33 changed files with 1038 additions and 62 deletions

View File

@@ -28,4 +28,6 @@
static void __used common(void)
{
OFFSET(THREAD_sp, struct thread, tr_sp);
OFFSET(THREAD_fsbase, struct thread, tr_ml.tr_fsbase);
OFFSET(THREAD_gsbase, struct thread, tr_ml.tr_gsbase);
}

View File

@@ -3,6 +3,7 @@
#include <stdint.h>
#define MSR_FS_BASE 0xC0000100
#define MSR_GS_BASE 0xC0000101
#define MSR_KERNEL_GS_BASE 0xC0000102

View File

@@ -3,6 +3,10 @@
#include <kernel/sched.h>
struct ml_thread {
virt_addr_t tr_gsbase, tr_fsbase;
};
struct ml_cpu_context;
/* switch from one thread to another. the stack of the `to` thread must have
@@ -28,4 +32,15 @@ extern kern_status_t ml_thread_prepare_user_context(
const uintptr_t *args,
size_t nr_args);
extern kern_status_t ml_thread_config_get(
struct thread *thread,
kern_config_key_t key,
void *out,
size_t max);
extern kern_status_t ml_thread_config_set(
struct thread *thread,
kern_config_key_t key,
const void *ptr,
size_t len);
#endif

View File

@@ -175,7 +175,7 @@ static bool read_stack_frame(
struct stack_frame *out)
{
if (bp >= VM_PAGEMAP_BASE) {
*out = *(struct stack_frame *)out;
*out = *(struct stack_frame *)bp;
return true;
}

View File

@@ -100,7 +100,7 @@ static void delete_ptab(phys_addr_t pt)
return;
}
pt &= ~VM_PAGE_MASK;
pt = ENTRY_TO_PTR(pt);
if (!pt) {
/* physical address of 0x0, nothing to delete */
return;
@@ -117,7 +117,7 @@ static void delete_pdir(phys_addr_t pd)
return;
}
pd &= ~0x1FFFFFULL;
pd &= ENTRY_TO_PTR(pd);
if (!pd) {
/* physical address of 0x0, nothing to delete */
return;
@@ -412,8 +412,46 @@ pmap_t pmap_create(void)
return pmap;
}
static void delete_pdpt(phys_addr_t pd)
{
if (pd & PTE_PAGESIZE) {
/* this entry points to a hugepage, nothing to delete */
return;
}
pd &= ENTRY_TO_PTR(pd);
if (!pd) {
/* physical address of 0x0, nothing to delete */
return;
}
struct pdpt *pdpt = vm_phys_to_virt(ENTRY_TO_PTR(pd));
for (int i = 0; i < 512; i++) {
if (pdpt->p_pages[i] & PTE_PAGESIZE) {
/* this is a hugepage, there is nothing to delete */
continue;
}
if (!pdpt->p_entries[i]) {
continue;
}
delete_ptab(pdpt->p_entries[i]);
}
kfree(pdpt);
}
void pmap_destroy(pmap_t pmap)
{
struct pml4t *pml4t = vm_phys_to_virt(ENTRY_TO_PTR(pmap));
for (unsigned int i = 0; i < 256; i++) {
if (pml4t->p_entries[i]) {
delete_pdpt(pml4t->p_entries[i]);
}
}
kfree(pml4t);
}
static void log_fault(virt_addr_t fault_addr, enum pmap_fault_flags flags)

View File

@@ -1,5 +1,7 @@
#include <arch/msr.h>
#include <kernel/machine/cpu.h>
#include <kernel/machine/thread.h>
#include <kernel/thread.h>
#define MAX_REG_ARGS 6
#define REG_ARG_0 rdi
@@ -77,3 +79,52 @@ extern kern_status_t ml_thread_prepare_user_context(
return KERN_OK;
}
kern_status_t ml_thread_config_get(
struct thread *thread,
kern_config_key_t key,
void *out,
size_t max)
{
return KERN_OK;
}
kern_status_t ml_thread_config_set(
struct thread *thread,
kern_config_key_t key,
const void *ptr,
size_t len)
{
switch (key) {
case THREAD_CFG_FSBASE:
if (len != sizeof(thread->tr_ml.tr_fsbase)) {
return KERN_INVALID_ARGUMENT;
}
thread->tr_ml.tr_fsbase = *(virt_addr_t *)ptr;
if (thread == current_thread()) {
wrmsr(MSR_FS_BASE, thread->tr_ml.tr_fsbase);
}
break;
case THREAD_CFG_GSBASE:
if (len != sizeof(thread->tr_ml.tr_gsbase)) {
return KERN_INVALID_ARGUMENT;
}
thread->tr_ml.tr_gsbase = *(virt_addr_t *)ptr;
if (thread == current_thread()) {
/* we're in the kernel right now, so the user and kernel
* gs-base registers are swapped. when we return to
* usermode, this value will be swapped back into
* the user gs-base register */
wrmsr(MSR_KERNEL_GS_BASE, thread->tr_ml.tr_gsbase);
}
break;
default:
return KERN_INVALID_ARGUMENT;
}
return KERN_OK;
}

View File

@@ -13,6 +13,22 @@ ml_thread_switch:
push %rax
push %rcx
push %rdx
// set fs-base
mov $0xC0000100, %rcx
movq THREAD_fsbase(%rsi), %rax
movq THREAD_fsbase(%rsi), %rdx
shr $32, %rdx
wrmsr
// set (kernel) gs-base (it will be swapped back into user-gs-base at
// the end of this function)
mov $0xC0000102, %rcx
movq THREAD_gsbase(%rsi), %rax
movq THREAD_gsbase(%rsi), %rdx
shr $32, %rdx
wrmsr
push %rbx
pushq $0
push %rbp

View File

@@ -6,6 +6,7 @@
#include <kernel/vm.h>
#define ADDRESS_SPACE_COPY_ALL ((size_t)-1)
#define ADDRESS_SPACE_F_
struct address_space;
struct vm_object;
@@ -158,6 +159,12 @@ extern kern_status_t address_space_memmove_v(
size_t bytes_to_move,
size_t *nr_bytes_moved);
extern kern_status_t address_space_translate(
struct address_space *space,
virt_addr_t in,
phys_addr_t *out,
unsigned long *irq_flags);
void address_space_dump(struct address_space *region);
DEFINE_OBJECT_LOCK_FUNCTION(address_space, s_base)

34
include/kernel/futex.h Normal file
View File

@@ -0,0 +1,34 @@
#ifndef KERNEL_FUTEX_H_
#define KERNEL_FUTEX_H_
#include <kernel/btree.h>
#include <kernel/wait.h>
#include <mango/types.h>
struct task;
struct address_space;
typedef uintptr_t futex_key_t;
struct futex {
struct btree_node f_node;
futex_key_t f_key;
struct waitqueue f_waiters;
};
extern kern_status_t futex_init(void);
extern kern_status_t futex_get(
kern_futex_t *futex,
futex_key_t *out,
unsigned int flags);
extern kern_status_t futex_wait(
futex_key_t futex,
kern_futex_t new_val,
unsigned int flags);
extern kern_status_t futex_wake(
futex_key_t futex,
size_t nwaiters,
unsigned int flags);
#endif

View File

@@ -56,8 +56,29 @@ extern kern_status_t sys_task_create_thread(
extern kern_status_t sys_task_get_address_space(
kern_handle_t task,
kern_handle_t *out);
extern kern_status_t sys_task_config_get(
kern_handle_t task,
kern_config_key_t key,
void *ptr,
size_t len);
extern kern_status_t sys_task_config_set(
kern_handle_t task,
kern_config_key_t key,
const void *ptr,
size_t len);
extern kern_status_t sys_thread_start(kern_handle_t thread);
extern kern_status_t sys_thread_exit(void);
extern kern_status_t sys_thread_config_get(
kern_handle_t thread,
kern_config_key_t key,
void *ptr,
size_t len);
extern kern_status_t sys_thread_config_set(
kern_handle_t thread,
kern_config_key_t key,
const void *ptr,
size_t len);
extern kern_status_t sys_vm_object_create(
const char *name,
@@ -197,6 +218,15 @@ extern kern_status_t sys_vm_controller_supply_pages(
off_t src_offset,
size_t count);
extern kern_status_t sys_futex_wait(
kern_futex_t *futex,
kern_futex_t new_val,
unsigned int flags);
extern kern_status_t sys_futex_wake(
kern_futex_t *futex,
unsigned int nr_waiters,
unsigned int flags);
extern virt_addr_t syscall_get_function(unsigned int sysid);
#endif

View File

@@ -27,7 +27,7 @@ struct task {
struct address_space *t_address_space;
spin_lock_t t_handles_lock;
struct handle_table *t_handles;
struct btree b_channels;
struct btree t_channels, t_futex;
struct btree_node t_tasklist;
struct queue_entry t_child_entry;
@@ -48,6 +48,7 @@ static inline void task_unref(struct task *task)
{
object_unref(&task->t_base);
}
extern void task_exit(int status);
extern kern_status_t task_add_child(struct task *parent, struct task *child);
extern kern_status_t task_add_channel(
struct task *task,

View File

@@ -1,6 +1,7 @@
#ifndef KERNEL_THREAD_H_
#define KERNEL_THREAD_H_
#include <kernel/machine/thread.h>
#include <kernel/msg.h>
#include <kernel/object.h>
#include <kernel/vm-controller.h>
@@ -22,7 +23,7 @@ enum thread_flags {
};
struct thread {
struct object thr_base;
struct object tr_base;
enum thread_state tr_state;
enum thread_flags tr_flags;
@@ -38,15 +39,13 @@ struct thread {
virt_addr_t tr_ip, tr_sp, tr_bp;
virt_addr_t tr_cpu_user_sp, tr_cpu_kernel_sp;
struct ml_thread tr_ml;
struct runqueue *tr_rq;
struct msg tr_msg;
struct page_request tr_page_req;
struct queue_entry tr_parent_entry;
struct queue_entry tr_rqentry;
struct vm_page *tr_kstack;
struct vm_object *tr_ustack;
};
extern struct thread *thread_alloc(void);
@@ -60,8 +59,24 @@ extern kern_status_t thread_init_user(
size_t nr_args);
extern int thread_priority(struct thread *thr);
extern void thread_awaken(struct thread *thr);
extern void thread_exit(void);
extern void thread_join(struct thread *thread, unsigned long *irq_flags);
extern void thread_kill(struct thread *thread);
extern void idle(void);
extern struct thread *create_kernel_thread(void (*fn)(void));
extern struct thread *create_idle_thread(void);
extern kern_status_t thread_config_get(
struct thread *thread,
kern_config_key_t key,
void *out,
size_t max);
extern kern_status_t thread_config_set(
struct thread *thread,
kern_config_key_t key,
const void *ptr,
size_t len);
DEFINE_OBJECT_LOCK_FUNCTION(thread, tr_base)
#endif

View File

@@ -40,6 +40,15 @@ extern void thread_wait_end_nosleep(
struct waitqueue *q);
extern void wait_on_queue(struct waitqueue *q);
extern void wakeup_queue(struct waitqueue *q);
extern void wakeup_n(struct waitqueue *q, size_t n);
extern void wakeup_one(struct waitqueue *q);
static inline bool waitqueue_empty(struct waitqueue *wq)
{
unsigned long flags;
spin_lock_irqsave(&wq->wq_lock, &flags);
bool result = queue_empty(&wq->wq_waiters);
spin_unlock_irqrestore(&wq->wq_lock, flags);
return result;
}
#endif

211
kernel/futex.c Normal file
View File

@@ -0,0 +1,211 @@
#include <kernel/address-space.h>
#include <kernel/futex.h>
#include <kernel/sched.h>
#include <kernel/task.h>
#include <mango/status.h>
#define FUTEX_CREATE 0x40u
static struct btree shared_futex_list = {0};
static spin_lock_t shared_futex_list_lock = SPIN_LOCK_INIT;
static struct vm_cache futex_cache = {
.c_name = "futex",
.c_obj_size = sizeof(struct futex),
};
BTREE_DEFINE_SIMPLE_INSERT(struct futex, f_node, f_key, put_futex)
BTREE_DEFINE_SIMPLE_GET(struct futex, uintptr_t, f_node, f_key, get_futex)
kern_status_t futex_init(void)
{
vm_cache_init(&futex_cache);
return KERN_OK;
}
static kern_status_t get_data(
futex_key_t key,
unsigned int flags,
struct futex **out,
spin_lock_t **out_lock,
unsigned long *irq_flags)
{
spin_lock_t *lock = NULL;
struct btree *futex_list = NULL;
if (flags & FUTEX_PRIVATE) {
struct task *self = current_task();
lock = &self->t_base.ob_lock;
futex_list = &self->t_futex;
} else if (flags & FUTEX_SHARED) {
lock = &shared_futex_list_lock;
futex_list = &shared_futex_list;
} else {
return KERN_INVALID_ARGUMENT;
}
spin_lock_irqsave(lock, irq_flags);
struct futex *futex = get_futex(futex_list, key);
if (!futex && !(flags & FUTEX_CREATE)) {
spin_unlock_irqrestore(lock, *irq_flags);
return KERN_NO_ENTRY;
}
futex = vm_cache_alloc(&futex_cache, VM_NORMAL);
if (!futex) {
spin_unlock_irqrestore(lock, *irq_flags);
return KERN_NO_MEMORY;
}
futex->f_key = key;
put_futex(futex_list, futex);
*out = futex;
*out_lock = lock;
return KERN_OK;
}
static kern_status_t cleanup_data(struct futex *futex, unsigned int flags)
{
struct btree *futex_list = NULL;
if (flags & FUTEX_PRIVATE) {
struct task *self = current_task();
futex_list = &self->t_futex;
} else if (flags & FUTEX_SHARED) {
futex_list = &shared_futex_list;
} else {
return KERN_INVALID_ARGUMENT;
}
btree_delete(futex_list, &futex->f_node);
vm_cache_free(&futex_cache, futex);
return KERN_OK;
}
static kern_status_t futex_get_shared(kern_futex_t *futex, futex_key_t *out)
{
struct task *self = current_task();
struct address_space *space = self->t_address_space;
unsigned long flags;
address_space_lock_irqsave(space, &flags);
kern_status_t status = address_space_translate(
space,
(virt_addr_t)futex,
out,
&flags);
address_space_unlock_irqrestore(space, flags);
return status;
}
static kern_status_t futex_get_private(kern_futex_t *futex, futex_key_t *out)
{
*out = (futex_key_t)futex;
return KERN_OK;
}
kern_status_t futex_get(
kern_futex_t *futex,
futex_key_t *out,
unsigned int flags)
{
if (flags & FUTEX_PRIVATE) {
return futex_get_private(futex, out);
}
if (flags & FUTEX_SHARED) {
return futex_get_shared(futex, out);
}
return KERN_INVALID_ARGUMENT;
}
static kern_status_t futex_read(
struct futex *futex,
unsigned int flags,
kern_futex_t *out)
{
if (flags & FUTEX_PRIVATE) {
virt_addr_t addr = futex->f_key;
*out = *(kern_futex_t *)addr;
return KERN_OK;
}
if (flags & FUTEX_SHARED) {
phys_addr_t paddr = futex->f_key;
virt_addr_t vaddr = (virt_addr_t)vm_phys_to_virt(paddr);
if (!vaddr) {
return KERN_MEMORY_FAULT;
}
*out = *(kern_futex_t *)vaddr;
return KERN_OK;
}
return KERN_INVALID_ARGUMENT;
}
kern_status_t futex_wait(
futex_key_t key,
kern_futex_t new_val,
unsigned int flags)
{
spin_lock_t *lock = NULL;
unsigned long irq_flags = 0;
struct futex *futex = NULL;
kern_status_t status = get_data(key, flags, &futex, &lock, &irq_flags);
if (status != KERN_OK) {
return status;
}
kern_futex_t current_val = 0;
status = futex_read(futex, flags, &current_val);
if (status != KERN_OK) {
spin_unlock_irqrestore(lock, irq_flags);
return status;
}
if (current_val != new_val) {
spin_unlock_irqrestore(lock, irq_flags);
return KERN_BAD_STATE;
}
struct wait_item waiter;
thread_wait_begin(&waiter, &futex->f_waiters);
spin_unlock_irqrestore(lock, irq_flags);
schedule(SCHED_NORMAL);
spin_lock_irqsave(lock, &irq_flags);
thread_wait_end(&waiter, &futex->f_waiters);
if (waitqueue_empty(&futex->f_waiters)) {
cleanup_data(futex, flags);
}
spin_unlock_irqrestore(lock, irq_flags);
return KERN_OK;
}
kern_status_t futex_wake(futex_key_t key, size_t nwaiters, unsigned int flags)
{
spin_lock_t *lock = NULL;
unsigned long irq_flags = 0;
struct futex *futex = NULL;
kern_status_t status = get_data(key, flags, &futex, &lock, &irq_flags);
if (status != KERN_OK) {
return status;
}
if (nwaiters == FUTEX_WAKE_ALL) {
wakeup_queue(&futex->f_waiters);
} else {
wakeup_n(&futex->f_waiters, nwaiters);
}
spin_unlock_irqrestore(lock, irq_flags);
return KERN_OK;
}

View File

@@ -12,7 +12,7 @@
#define RESERVED_HANDLES 64
static struct vm_cache handle_table_cache = {
.c_name = "handle_table",
.c_name = "handle-table",
.c_obj_size = sizeof(struct handle_table),
};
@@ -33,8 +33,48 @@ struct handle_table *handle_table_create(void)
return out;
}
static void do_handle_table_destroy_leaf(struct handle_table *tab)
{
while (1) {
unsigned int index = bitmap_lowest_set(
tab->t_handles.t_handle_map,
HANDLES_PER_TABLE);
if (index == BITMAP_NPOS) {
break;
}
struct handle *child = &tab->t_handles.t_handle_list[index];
bitmap_clear(tab->t_subtables.t_subtable_map, index);
if (child->h_object) {
object_remove_handle(child->h_object);
child->h_object = NULL;
}
}
}
static void do_handle_table_destroy(
struct handle_table *tab,
unsigned int depth)
{
if (depth == MAX_TABLE_DEPTH - 1) {
do_handle_table_destroy_leaf(tab);
return;
}
for (size_t i = 0; i < REFS_PER_TABLE; i++) {
struct handle_table *child
= tab->t_subtables.t_subtable_list[i];
if (child) {
do_handle_table_destroy(child, depth + 1);
}
}
vm_cache_free(&handle_table_cache, tab);
}
void handle_table_destroy(struct handle_table *tab)
{
do_handle_table_destroy(tab, 0);
}
static kern_status_t decode_handle_indices(

View File

@@ -88,25 +88,25 @@ kern_status_t port_send_msg(
}
struct thread *self = current_thread();
struct msg *msg = &self->tr_msg;
memset(msg, 0x0, sizeof *msg);
msg->msg_status = KMSG_WAIT_RECEIVE;
msg->msg_sender_thread = self;
msg->msg_sender_port = port;
memcpy(&msg->msg_req, in_msg, sizeof msg->msg_req);
memcpy(&msg->msg_resp, out_reply, sizeof msg->msg_req);
struct msg msg;
memset(&msg, 0x0, sizeof msg);
msg.msg_status = KMSG_WAIT_RECEIVE;
msg.msg_sender_thread = self;
msg.msg_sender_port = port;
memcpy(&msg.msg_req, in_msg, sizeof msg.msg_req);
memcpy(&msg.msg_resp, out_reply, sizeof msg.msg_req);
unsigned long flags;
channel_lock_irqsave(port->p_remote, &flags);
port->p_status = PORT_SEND_BLOCKED;
channel_enqueue_msg(port->p_remote, msg);
channel_enqueue_msg(port->p_remote, &msg);
channel_unlock_irqrestore(port->p_remote, flags);
wait_for_reply(msg, lock_flags);
wait_for_reply(&msg, lock_flags);
channel_lock_irqsave(port->p_remote, &flags);
btree_delete(&port->p_remote->c_msg, &msg->msg_node);
btree_delete(&port->p_remote->c_msg, &msg.msg_node);
channel_unlock_irqrestore(port->p_remote, flags);
return msg->msg_result;
return msg.msg_result;
}

View File

@@ -60,8 +60,13 @@ SYSCALL_GATE task_self SYS_TASK_SELF 0
SYSCALL_GATE task_create SYS_TASK_CREATE 5
SYSCALL_GATE task_create_thread SYS_TASK_CREATE_THREAD 6
SYSCALL_GATE task_get_address_space SYS_TASK_GET_ADDRESS_SPACE 1
SYSCALL_GATE task_config_get SYS_TASK_CONFIG_GET 4
SYSCALL_GATE task_config_set SYS_TASK_CONFIG_SET 4
SYSCALL_GATE thread_start SYS_THREAD_START 1
SYSCALL_GATE thread_exit SYS_THREAD_EXIT 0
SYSCALL_GATE thread_config_get SYS_THREAD_CONFIG_GET 4
SYSCALL_GATE thread_config_set SYS_THREAD_CONFIG_SET 4
SYSCALL_GATE vm_object_create SYS_VM_OBJECT_CREATE 5
SYSCALL_GATE vm_object_read SYS_VM_OBJECT_READ 5
@@ -100,3 +105,6 @@ SYSCALL_GATE vm_controller_supply_pages SYS_VM_CONTROLLER_SUPPLY_PAGES 6
SYSCALL_GATE kern_object_wait SYS_KERN_OBJECT_WAIT 2
SYSCALL_GATE futex_wait SYS_FUTEX_WAIT 3
SYSCALL_GATE futex_wake SYS_FUTEX_WAKE 3

View File

@@ -0,0 +1,16 @@
#ifndef MANGO_FUTEX_H_
#define MANGO_FUTEX_H_
#include <mango/status.h>
#include <mango/types.h>
extern kern_status_t futex_wait(
kern_futex_t *futex,
kern_futex_t new_val,
unsigned int flags);
extern kern_status_t futex_wake(
kern_futex_t *futex,
unsigned int nr_waiters,
unsigned int flags);
#endif

View File

@@ -23,7 +23,28 @@ extern kern_status_t task_create_thread(
extern kern_status_t task_get_address_space(
kern_handle_t task,
kern_handle_t *out);
extern kern_status_t task_config_get(
kern_handle_t task,
kern_config_key_t key,
void *ptr,
size_t len);
extern kern_status_t task_config_set(
kern_handle_t task,
kern_config_key_t key,
const void *ptr,
size_t len);
extern kern_status_t thread_start(kern_handle_t thread);
extern kern_status_t thread_exit(void);
extern kern_status_t thread_config_get(
kern_handle_t thread,
kern_config_key_t key,
void *ptr,
size_t len);
extern kern_status_t thread_config_set(
kern_handle_t thread,
kern_config_key_t key,
const void *ptr,
size_t len);
#endif

View File

@@ -1,6 +1,8 @@
#ifndef MANGO_SIGNAL_H_
#define MANGO_SIGNAL_H_
#define THREAD_SIGNAL_STOPPED 0x01u
#define CHANNEL_SIGNAL_MSG_RECEIVED 0x01u
#define VM_CONTROLLER_SIGNAL_REQUEST_RECEIVED 0x01u

View File

@@ -13,7 +13,12 @@
#define SYS_TASK_CREATE 0x09u
#define SYS_TASK_CREATE_THREAD 0x0Au
#define SYS_TASK_GET_ADDRESS_SPACE 0x0Bu
#define SYS_TASK_CONFIG_GET 0x2Au
#define SYS_TASK_CONFIG_SET 0x2Bu
#define SYS_THREAD_START 0x0Cu
#define SYS_THREAD_EXIT 0x2Eu
#define SYS_THREAD_CONFIG_GET 0x2Cu
#define SYS_THREAD_CONFIG_SET 0x2Du
#define SYS_VM_OBJECT_CREATE 0x0Du
#define SYS_VM_OBJECT_READ 0x0Eu
#define SYS_VM_OBJECT_WRITE 0x0Fu
@@ -41,5 +46,7 @@
#define SYS_VM_CONTROLLER_CREATE_OBJECT 0x25u
#define SYS_VM_CONTROLLER_DETACH_OBJECT 0x26u
#define SYS_VM_CONTROLLER_SUPPLY_PAGES 0x27u
#define SYS_FUTEX_WAIT 0x28u
#define SYS_FUTEX_WAKE 0x29u
#endif

View File

@@ -16,8 +16,17 @@
#define MAP_ADDRESS_INVALID ((virt_addr_t)0)
#define KERN_HANDLE_INVALID ((kern_handle_t)0xFFFFFFFF)
#define KERN_CFG_INVALID 0x00u
#define KERN_CFG_PAGE_SIZE 0x01u
/* config keys for use with kern_config_get/kern_config_set */
#define KERN_CFG_INVALID 0x00000u
#define KERN_CFG_PAGE_SIZE 0x00001u
/* config keys for use with task_config_get/task_config_set */
#define TASK_CFG_INVALID 0x00000u
/* config keys for use with thread_config_get/thread_config_set */
#define THREAD_CFG_INVALID 0x00000u
#define THREAD_CFG_FSBASE 0x20001u
#define THREAD_CFG_GSBASE 0x20002u
/* maximum number of handles that can be sent in a single message */
#define KERN_MSG_MAX_HANDLES 64
@@ -44,6 +53,13 @@
#define PAGE_REQUEST_DIRTY 0x02u
#define PAGE_REQUEST_DETACH 0x03u
/* futex special values */
#define FUTEX_WAKE_ALL ((size_t)-1)
/* futex flags */
#define FUTEX_PRIVATE 0x01u
#define FUTEX_SHARED 0x02u
#define IOVEC(p, len) \
{ \
.io_base = (virt_addr_t)(p), \
@@ -75,6 +91,7 @@ typedef uint32_t kern_handle_t;
typedef uint32_t kern_config_key_t;
typedef uint32_t vm_prot_t;
typedef int64_t ssize_t;
typedef uint32_t kern_futex_t;
typedef unsigned short equeue_packet_type_t;

View File

@@ -123,8 +123,16 @@ void __schedule(enum sched_mode mode)
enum thread_state prev_state = READ_ONCE(prev->tr_state);
if ((mode == SCHED_IRQ || prev_state == THREAD_READY)
&& prev != rq->rq_idle) {
bool reschedule = false;
if (prev_state == THREAD_READY || mode == SCHED_IRQ) {
reschedule = true;
}
if (prev == rq->rq_idle || prev_state == THREAD_STOPPED) {
reschedule = false;
}
if (reschedule) {
rq_enqueue(rq, prev);
}

View File

@@ -222,13 +222,13 @@ kern_status_t task_add_channel(
{
channel->c_id = id;
if (!task->b_channels.b_root) {
task->b_channels.b_root = &channel->c_node;
btree_insert_fixup(&task->b_channels, &channel->c_node);
if (!task->t_channels.b_root) {
task->t_channels.b_root = &channel->c_node;
btree_insert_fixup(&task->t_channels, &channel->c_node);
return KERN_OK;
}
struct btree_node *cur = task->b_channels.b_root;
struct btree_node *cur = task->t_channels.b_root;
while (1) {
struct channel *cur_node
= BTREE_CONTAINER(struct channel, c_node, cur);
@@ -255,7 +255,7 @@ kern_status_t task_add_channel(
cur = next;
}
btree_insert_fixup(&task->b_channels, &channel->c_node);
btree_insert_fixup(&task->t_channels, &channel->c_node);
return KERN_OK;
}
@@ -268,7 +268,7 @@ BTREE_DEFINE_SIMPLE_GET(
struct channel *task_get_channel(struct task *task, unsigned int id)
{
return get_channel_with_id(&task->b_channels, id);
return get_channel_with_id(&task->t_channels, id);
}
struct task *task_from_tid(tid_t id)
@@ -280,6 +280,72 @@ struct task *task_from_tid(tid_t id)
return t;
}
void task_exit(int status)
{
struct task *self = current_task();
unsigned long flags;
task_lock_irqsave(self, &flags);
struct task *parent = self->t_parent;
if (parent) {
task_unlock_irqrestore(self, flags);
task_lock_irqsave(parent, &flags);
task_lock(self);
queue_delete(&parent->t_children, &self->t_child_entry);
task_unlock(parent);
}
struct thread *cur_thread = current_thread();
self->t_state = TASK_STOPPED;
cur_thread->tr_state = THREAD_STOPPED;
struct queue_entry *cur = queue_first(&self->t_threads);
while (cur) {
struct queue_entry *next = queue_next(cur);
struct thread *thread
= QUEUE_CONTAINER(struct thread, tr_parent_entry, cur);
if (thread == cur_thread) {
cur = next;
continue;
}
thread_lock(thread);
thread_kill(thread);
queue_delete(&self->t_threads, cur);
thread_unlock(thread);
cur = next;
}
object_unref(&self->t_address_space->s_base);
spin_lock_t *handles_lock = &self->t_handles_lock;
struct handle_table *handles = self->t_handles;
spin_lock(&self->t_handles_lock);
pmap_switch(get_kernel_pmap());
pmap_destroy(self->t_pmap);
task_unlock(self);
handle_table_destroy(handles);
printk("thread %s[%u.%u] killed",
self->t_name,
self->t_id,
cur_thread->tr_id);
printk("task %s[%u] killed (%u, %u)",
self->t_name,
self->t_id,
self->t_base.ob_refcount,
self->t_base.ob_handles);
spin_unlock_irqrestore(handles_lock, flags);
while (1) {
schedule(SCHED_NORMAL);
}
}
kern_status_t task_open_handle(
struct task *task,
struct object *obj,

View File

@@ -2,15 +2,17 @@
#include <kernel/cpu.h>
#include <kernel/machine/thread.h>
#include <kernel/object.h>
#include <kernel/printk.h>
#include <kernel/task.h>
#include <kernel/thread.h>
#include <mango/signal.h>
#define THREAD_CAST(p) OBJECT_C_CAST(struct thread, thr_base, &thread_type, p)
#define THREAD_CAST(p) OBJECT_C_CAST(struct thread, tr_base, &thread_type, p)
static struct object_type thread_type = {
.ob_name = "thread",
.ob_size = sizeof(struct thread),
.ob_header_offset = offsetof(struct thread, thr_base),
.ob_header_offset = offsetof(struct thread, tr_base),
};
kern_status_t thread_object_type_init(void)
@@ -63,9 +65,6 @@ kern_status_t thread_init_user(
const uintptr_t *args,
size_t nr_args)
{
thr->tr_id = thr->tr_parent->t_next_thread_id++;
thr->tr_prio = PRIO_NORMAL;
thr->tr_state = THREAD_READY;
thr->tr_quantum_target = default_quantum();
@@ -142,6 +141,55 @@ void thread_awaken(struct thread *thr)
rq_unlock(rq, flags);
}
void thread_exit(void)
{
struct thread *self = current_thread();
unsigned long flags;
thread_lock_irqsave(self, &flags);
self->tr_state = THREAD_STOPPED;
object_assert_signal(&self->tr_base, THREAD_SIGNAL_STOPPED);
printk("thread %s[%u.%u] exited",
self->tr_parent->t_name,
self->tr_parent->t_id,
self->tr_id);
thread_unlock_irqrestore(self, flags);
while (1) {
schedule(SCHED_NORMAL);
}
}
void thread_join(struct thread *thread, unsigned long *irq_flags)
{
while (1) {
if (thread->tr_state == THREAD_STOPPED) {
break;
}
object_wait_signal(
&thread->tr_base,
THREAD_SIGNAL_STOPPED,
irq_flags);
}
}
void thread_kill(struct thread *thread)
{
thread->tr_state = THREAD_STOPPED;
if (thread->tr_rq) {
unsigned long flags;
rq_lock(thread->tr_rq, &flags);
rq_remove_thread(thread->tr_rq, thread);
rq_unlock(thread->tr_rq, flags);
}
object_assert_signal(&thread->tr_base, THREAD_SIGNAL_STOPPED);
printk("thread %s[%u.%u] killed",
thread->tr_parent->t_name,
thread->tr_parent->t_id,
thread->tr_id);
}
struct thread *create_kernel_thread(void (*fn)(void))
{
struct task *kernel = kernel_task();
@@ -185,3 +233,31 @@ struct thread *create_idle_thread(void)
return thr;
}
kern_status_t thread_config_get(
struct thread *thread,
kern_config_key_t key,
void *out,
size_t max)
{
switch (key) {
default:
break;
}
return ml_thread_config_get(thread, key, out, max);
}
kern_status_t thread_config_set(
struct thread *thread,
kern_config_key_t key,
const void *ptr,
size_t len)
{
switch (key) {
default:
break;
}
return ml_thread_config_set(thread, key, ptr, len);
}

View File

@@ -69,6 +69,24 @@ void wakeup_queue(struct waitqueue *q)
spin_unlock_irqrestore(&q->wq_lock, flags);
}
void wakeup_n(struct waitqueue *q, size_t nr_waiters)
{
unsigned long flags;
spin_lock_irqsave(&q->wq_lock, &flags);
struct queue_entry *ent = queue_pop_front(&q->wq_waiters);
while (ent && nr_waiters > 0) {
struct wait_item *waiter
= QUEUE_CONTAINER(struct wait_item, w_entry, ent);
struct thread *thr = waiter->w_thread;
thread_awaken(thr);
ent = queue_pop_front(&q->wq_waiters);
nr_waiters--;
}
spin_unlock_irqrestore(&q->wq_lock, flags);
}
void wakeup_one(struct waitqueue *q)
{
unsigned long flags;

View File

@@ -11,6 +11,9 @@ static const virt_addr_t syscall_table[] = {
SYSCALL_TABLE_ENTRY(TASK_CREATE_THREAD, task_create_thread),
SYSCALL_TABLE_ENTRY(TASK_GET_ADDRESS_SPACE, task_get_address_space),
SYSCALL_TABLE_ENTRY(THREAD_START, thread_start),
SYSCALL_TABLE_ENTRY(THREAD_EXIT, thread_exit),
SYSCALL_TABLE_ENTRY(THREAD_CONFIG_GET, thread_config_get),
SYSCALL_TABLE_ENTRY(THREAD_CONFIG_SET, thread_config_set),
SYSCALL_TABLE_ENTRY(VM_OBJECT_CREATE, vm_object_create),
SYSCALL_TABLE_ENTRY(VM_OBJECT_READ, vm_object_read),
SYSCALL_TABLE_ENTRY(VM_OBJECT_WRITE, vm_object_write),
@@ -48,6 +51,8 @@ static const virt_addr_t syscall_table[] = {
VM_CONTROLLER_SUPPLY_PAGES,
vm_controller_supply_pages),
SYSCALL_TABLE_ENTRY(KERN_OBJECT_WAIT, kern_object_wait),
SYSCALL_TABLE_ENTRY(FUTEX_WAIT, futex_wait),
SYSCALL_TABLE_ENTRY(FUTEX_WAKE, futex_wake),
};
static const size_t syscall_table_count
= sizeof syscall_table / sizeof syscall_table[0];

37
syscall/futex.c Normal file
View File

@@ -0,0 +1,37 @@
#include <kernel/futex.h>
#include <kernel/sched.h>
#include <kernel/syscall.h>
#include <kernel/task.h>
kern_status_t sys_futex_wait(
kern_futex_t *futex,
kern_futex_t new_val,
unsigned int flags)
{
struct task *self = current_task();
if (!validate_access_r(self, futex, sizeof *futex)) {
return KERN_MEMORY_FAULT;
}
futex_key_t key;
kern_status_t status = futex_get(futex, &key, flags);
if (status != KERN_OK) {
return status;
}
return futex_wait(key, new_val, flags);
}
kern_status_t sys_futex_wake(
kern_futex_t *futex,
unsigned int nr_waiters,
unsigned int flags)
{
futex_key_t key;
kern_status_t status = futex_get(futex, &key, flags);
if (status != KERN_OK) {
return status;
}
return futex_wake(key, nr_waiters, flags);
}

View File

@@ -1,10 +1,12 @@
#include <kernel/printk.h>
#include <kernel/sched.h>
#include <kernel/task.h>
#include <kernel/thread.h>
kern_status_t sys_kern_log(const char *s)
{
struct task *task = current_task();
printk("%s[%d]: %s", task->t_name, task->t_id, s);
struct thread *thread = current_thread();
printk("%s[%d.%d]: %s", task->t_name, task->t_id, thread->tr_id, s);
return KERN_OK;
}

View File

@@ -10,11 +10,8 @@ extern kern_status_t sys_task_exit(int status)
{
struct task *self = current_task();
printk("%s[%d]: task_exit(%d)", self->t_name, self->t_id, status);
while (1) {
milli_sleep(5000);
}
return KERN_UNIMPLEMENTED;
task_exit(status);
return KERN_FATAL_ERROR;
}
kern_status_t sys_task_self(kern_handle_t *out)
@@ -179,6 +176,7 @@ kern_status_t sys_task_create_thread(
&target_handle,
&out_handle);
if (status != KERN_OK) {
object_unref(target_obj);
task_unlock_irqrestore(self, flags);
return status;
}
@@ -198,10 +196,11 @@ kern_status_t sys_task_create_thread(
}
thread_init_user(thread, ip, sp, args, nr_args);
target_handle->h_object = &thread->thr_base;
object_add_handle(&thread->thr_base);
target_handle->h_object = &thread->tr_base;
object_add_handle(&thread->tr_base);
task_unlock_irqrestore(target, flags);
object_unref(target_obj);
*out_thread = out_handle;
return KERN_OK;
@@ -287,3 +286,82 @@ kern_status_t sys_thread_start(kern_handle_t thread_handle)
return KERN_OK;
}
kern_status_t sys_thread_exit(void)
{
thread_exit();
/* unreachable */
return KERN_FATAL_ERROR;
}
kern_status_t sys_thread_config_get(
kern_handle_t thread_handle,
kern_config_key_t key,
void *ptr,
size_t len)
{
unsigned long flags;
struct task *self = current_task();
if (!validate_access_w(self, ptr, len)) {
return KERN_MEMORY_FAULT;
}
struct object *thread_obj;
handle_flags_t thread_flags;
task_lock_irqsave(self, &flags);
kern_status_t status = task_resolve_handle(
self,
thread_handle,
&thread_obj,
&thread_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct thread *thread = thread_cast(thread_obj);
task_unlock_irqrestore(self, flags);
status = thread_config_get(thread, key, ptr, len);
object_unref(thread_obj);
return status;
}
kern_status_t sys_thread_config_set(
kern_handle_t thread_handle,
kern_config_key_t key,
const void *ptr,
size_t len)
{
unsigned long flags;
struct task *self = current_task();
if (!validate_access_w(self, ptr, len)) {
return KERN_MEMORY_FAULT;
}
struct object *thread_obj;
handle_flags_t thread_flags;
task_lock_irqsave(self, &flags);
kern_status_t status = task_resolve_handle(
self,
thread_handle,
&thread_obj,
&thread_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct thread *thread = thread_cast(thread_obj);
task_unlock_irqrestore(self, flags);
status = thread_config_set(thread, key, ptr, len);
object_unref(thread_obj);
return status;
}

View File

@@ -29,12 +29,9 @@ kern_status_t sys_vm_object_create(
kern_status_t status
= task_open_handle(self, &obj->vo_base, 0, out_handle);
if (status != KERN_OK) {
object_unref(&obj->vo_base);
return status;
}
object_unref(&obj->vo_base);
return KERN_OK;
return status;
}
kern_status_t sys_vm_object_read(

View File

@@ -34,6 +34,7 @@ struct vm_iterator {
/* iterates over the areas in an address space */
struct area_iterator {
struct address_space *it_root;
struct btree *it_list;
struct vm_area *it_area;
virt_addr_t it_search_base, it_search_limit;
virt_addr_t it_base, it_limit;
@@ -44,12 +45,15 @@ enum search_direction {
SEARCH_RIGHT,
};
static kern_status_t address_space_object_destroy(struct object *obj);
static kern_status_t address_space_cleanup(struct object *obj, struct queue *q);
static struct object_type address_space_type = {
.ob_name = "address-space",
.ob_size = sizeof(struct address_space),
.ob_header_offset = offsetof(struct address_space, s_base),
.ob_ops = {
.destroy = address_space_cleanup,
},
};
static struct vm_cache vm_area_cache = {
@@ -59,6 +63,10 @@ static struct vm_cache vm_area_cache = {
/*** INTERNAL UTILITY FUNCTION ************************************************/
static kern_status_t delete_area(
struct vm_area *mapping,
struct address_space *root);
/* this function must be called with `parent` locked */
static void put_entry(struct btree *tree, struct vm_area *child)
{
@@ -105,7 +113,7 @@ static void put_entry(struct btree *tree, struct vm_area *child)
}
static struct vm_area *get_entry(
struct address_space *region,
struct btree *list,
virt_addr_t address,
enum get_entry_flags flags)
{
@@ -114,7 +122,7 @@ static struct vm_area *get_entry(
/* `x` must be to the right of `y` */
#define RIGHT_DIFF(x, y) ((y) ? ((y)->vma_limit - (x)) : ((size_t)-1))
struct btree_node *cur = region->s_mappings.b_root;
struct btree_node *cur = list->b_root;
if (!cur) {
return NULL;
}
@@ -345,7 +353,7 @@ static void vm_iterator_begin(
it->it_region = region;
it->it_prot = prot;
it->it_mapping = get_entry(region, base, GET_ENTRY_EXACT);
it->it_mapping = get_entry(&region->s_mappings, base, GET_ENTRY_EXACT);
if (!it->it_mapping) {
return;
}
@@ -409,8 +417,10 @@ static kern_status_t vm_iterator_seek(struct vm_iterator *it, size_t nr_bytes)
it->it_base += nr_bytes;
struct vm_area *next_mapping
= get_entry(it->it_region, it->it_base, GET_ENTRY_EXACT);
struct vm_area *next_mapping = get_entry(
&it->it_region->s_mappings,
it->it_base,
GET_ENTRY_EXACT);
if (!next_mapping) {
it->it_buf = NULL;
it->it_max = 0;
@@ -478,12 +488,14 @@ static void vm_iterator_finish(struct vm_iterator *it)
static void area_iterator_begin(
struct area_iterator *it,
struct address_space *space,
struct btree *area_list,
virt_addr_t base,
virt_addr_t limit)
{
memset(it, 0x0, sizeof *it);
struct vm_area *area = get_entry(space, base, GET_ENTRY_CLOSEST_RIGHT);
struct vm_area *area
= get_entry(area_list, base, GET_ENTRY_CLOSEST_RIGHT);
if (!area) {
return;
}
@@ -552,8 +564,46 @@ end:
return KERN_NO_ENTRY;
}
static void area_iterator_erase(struct area_iterator *it)
static kern_status_t area_iterator_erase(struct area_iterator *it)
{
if (!it->it_root || !it->it_area) {
return KERN_NO_ENTRY;
}
struct btree_node *next = btree_next(&it->it_area->vma_node);
btree_delete(it->it_list, &it->it_area->vma_node);
vm_cache_free(&vm_area_cache, it->it_area);
if (!next) {
goto end;
}
struct vm_area *area = BTREE_CONTAINER(struct vm_area, vma_node, next);
if (!area) {
goto end;
}
if (area->vma_base > it->it_search_limit) {
goto end;
}
it->it_area = area;
it->it_base = area->vma_base;
it->it_limit = area->vma_base;
if (it->it_base < it->it_search_base) {
it->it_base = it->it_search_base;
}
if (it->it_limit > it->it_search_limit) {
it->it_limit = it->it_search_limit;
}
return KERN_OK;
end:
memset(it, 0x0, sizeof *it);
return KERN_NO_ENTRY;
}
/*** PUBLIC API ***************************************************************/
@@ -602,6 +652,35 @@ kern_status_t address_space_create(
return KERN_OK;
}
static void area_unmap(struct vm_area *area)
{
pmap_t pmap = area->vma_space->s_pmap;
virt_addr_t base = area->vma_base;
virt_addr_t limit = area->vma_limit;
for (virt_addr_t i = base; i < limit; i += VM_PAGE_SIZE) {
pmap_remove(pmap, i);
}
}
static kern_status_t address_space_cleanup(struct object *obj, struct queue *q)
{
struct address_space *space = ADDRESS_SPACE_CAST(obj);
struct btree_node *cur = btree_first(&space->s_mappings);
while (cur) {
struct btree_node *next = btree_next(cur);
struct vm_area *area
= BTREE_CONTAINER(struct vm_area, vma_node, cur);
btree_delete(&space->s_mappings, cur);
delete_area(area, space);
vm_cache_free(&vm_area_cache, area);
cur = next;
}
return KERN_OK;
}
kern_status_t address_space_map(
struct address_space *root,
virt_addr_t map_address,
@@ -850,7 +929,7 @@ static kern_status_t delete_area(
&mapping->vma_object->vo_mappings,
&mapping->vma_object_entry);
mapping->vma_object = NULL;
vm_object_unlock_irqrestore(mapping->vma_object, flags);
vm_object_unlock_irqrestore(object, flags);
object_unref(&object->vo_base);
/* don't actually delete the mapping yet. that will be done by
@@ -882,7 +961,12 @@ kern_status_t address_space_unmap(
virt_addr_t unmap_limit = unmap_base + unmap_length - 1;
tracek("unmapping %zx-%zx", unmap_base, unmap_limit);
area_iterator_begin(&it, region, unmap_base, unmap_limit);
area_iterator_begin(
&it,
region,
&region->s_mappings,
unmap_base,
unmap_limit);
while (it.it_area) {
struct vm_area *area = it.it_area;
virt_addr_t area_base = area->vma_base;
@@ -1015,7 +1099,12 @@ kern_status_t address_space_release(
virt_addr_t release_limit = release_base + release_length - 1;
tracek("unreserving %zx-%zx", release_base, release_limit);
area_iterator_begin(&it, space, release_base, release_limit);
area_iterator_begin(
&it,
space,
&space->s_reserved,
release_base,
release_limit);
while (it.it_area) {
struct vm_area *area = it.it_area;
virt_addr_t area_base = area->vma_base;
@@ -1102,9 +1191,9 @@ bool address_space_validate_access(
limit -= 1;
}
/* TODO improve this to not require a per-page loop */
for (virt_addr_t i = base; i < limit;) {
struct vm_area *area = get_entry(region, i, GET_ENTRY_EXACT);
struct vm_area *area
= get_entry(&region->s_mappings, i, GET_ENTRY_EXACT);
if (!area) {
return false;
}
@@ -1176,7 +1265,8 @@ kern_status_t address_space_demand_map(
unsigned long irq_flags;
address_space_lock_irqsave(region, &irq_flags);
struct vm_area *area = get_entry(region, addr, GET_ENTRY_EXACT);
struct vm_area *area
= get_entry(&region->s_mappings, addr, GET_ENTRY_EXACT);
if (!area || !area->vma_object) {
address_space_unlock_irqrestore(region, irq_flags);
return KERN_NO_ENTRY;
@@ -1413,6 +1503,43 @@ extern kern_status_t address_space_memmove_v(
return KERN_OK;
}
kern_status_t address_space_translate(
struct address_space *space,
virt_addr_t in,
phys_addr_t *out,
unsigned long *irq_flags)
{
if (in >= VM_KERNEL_BASE) {
return vm_virt_to_phys((const void *)in);
}
struct vm_area *area
= get_entry(&space->s_mappings, in, GET_ENTRY_EXACT);
if (!area || !area->vma_object) {
return KERN_NO_ENTRY;
}
off_t offset = in - area->vma_base + area->vma_object_offset;
struct vm_object *vmo = area->vma_object;
vm_object_lock(vmo);
address_space_unlock(space);
struct vm_page *pg = vm_object_get_page(
vmo,
offset,
VMO_ALLOCATE_MISSING_PAGE | VMO_REQUEST_MISSING_PAGE,
irq_flags);
if (!pg) {
return KERN_NO_ENTRY;
}
phys_addr_t paddr = vm_page_get_paddr(pg);
paddr += (in & VM_PAGE_MASK);
vm_object_unlock(vmo);
address_space_lock(space);
return paddr;
}
#ifdef TRACE
void address_space_dump(struct address_space *region)
{

View File

@@ -1,4 +1,5 @@
#include <kernel/address-space.h>
#include <kernel/printk.h>
#include <kernel/sched.h>
#include <kernel/util.h>
#include <kernel/vm-controller.h>
@@ -14,10 +15,30 @@
(p) += VM_PAGE_SIZE; \
}
static kern_status_t vm_object_cleanup(struct object *obj, struct queue *q)
{
struct vm_object *vmo = vm_object_cast(obj);
struct btree_node *cur = btree_first(&vmo->vo_pages);
while (cur) {
struct vm_page *pg
= BTREE_CONTAINER(struct vm_page, p_bnode, cur);
struct btree_node *next = btree_next(cur);
btree_delete(&vmo->vo_pages, cur);
vm_page_free(pg);
cur = next;
}
return KERN_OK;
}
static struct object_type vm_object_type = {
.ob_name = "vm-object",
.ob_size = sizeof(struct vm_object),
.ob_header_offset = offsetof(struct vm_object, vo_base),
.ob_ops = {
.destroy = vm_object_cleanup,
},
};
struct object_iterator {