Compare commits

...

12 Commits

28 changed files with 578 additions and 327 deletions

View File

@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.13) cmake_minimum_required(VERSION 4.0)
project(mango C ASM) project(mango C ASM)
if (NOT BUILD_TOOLS_DIR) if (NOT BUILD_TOOLS_DIR)
@@ -28,8 +28,6 @@ file(GLOB_RECURSE arch_sources_c arch/${kernel_arch}/*.c)
file(GLOB_RECURSE arch_sources_asm arch/${kernel_arch}/*.S) file(GLOB_RECURSE arch_sources_asm arch/${kernel_arch}/*.S)
file(GLOB_RECURSE arch_headers arch/${kernel_arch}/*.h) file(GLOB_RECURSE arch_headers arch/${kernel_arch}/*.h)
set_property(SOURCE ${arch_sources_asm} PROPERTY LANGUAGE C)
add_executable(${kernel_exe_name} add_executable(${kernel_exe_name}
${kernel_sources} ${kernel_sources}
${kernel_headers} ${kernel_headers}

View File

@@ -1,4 +1,5 @@
target_compile_options(${kernel_exe_name} PRIVATE target_compile_options(${kernel_exe_name} PRIVATE
-z max-page-size=0x1000 -m64 -mcmodel=large -mno-red-zone -mno-mmx -z max-page-size=0x1000 -m64 -mcmodel=large -mno-red-zone -mno-mmx
-mno-sse -mno-sse2 -D_64BIT -DBYTE_ORDER=1234) -mno-sse -mno-sse2 -D_64BIT -DBYTE_ORDER=1234)
target_link_libraries(${kernel_exe_name} "-z max-page-size=0x1000" "-T ${CMAKE_CURRENT_SOURCE_DIR}/arch/x86_64/layout.ld") target_link_libraries(${kernel_exe_name} "-static -z max-page-size=0x1000" "-T ${CMAKE_CURRENT_SOURCE_DIR}/arch/x86_64/layout.ld")

66
include/kernel/atomic.h Normal file
View File

@@ -0,0 +1,66 @@
#ifndef KERNEL_ATOMIC_H_
#define KERNEL_ATOMIC_H_
#include <stdbool.h>
#include <stdint.h>
typedef int64_t atomic_t;
/* load and return the value pointed to by `v` */
static inline atomic_t atomic_load(atomic_t *v)
{
return __atomic_load_n(v, __ATOMIC_ACQUIRE);
}
/* store the value `v` to the pointer `dest` */
static inline void atomic_store(atomic_t *dest, atomic_t v)
{
__atomic_store_n(dest, v, __ATOMIC_ACQUIRE);
}
/* store the value `v` to the pointer `dest`, and return the value previously
* stored at `dest` */
static inline atomic_t atomic_exchange(atomic_t *dest, atomic_t v)
{
return __atomic_exchange_n(dest, v, __ATOMIC_ACQUIRE);
}
/* compare the contents of `ptr` to the contents of `expected`.
* if they match, store the value `desired` to the pointer `ptr` and return
* true. if the do NOT match, store the value `*ptr` to the pointer `desired`
* and return false.
*/
static inline bool atomic_compare_exchange(
atomic_t *ptr,
atomic_t *expected,
atomic_t desired)
{
return __atomic_compare_exchange_n(
ptr,
expected,
desired,
false,
__ATOMIC_ACQUIRE,
__ATOMIC_ACQUIRE);
}
/* perform the operation *ptr += val, and return the result */
static inline atomic_t atomic_add_fetch(atomic_t *ptr, atomic_t val)
{
return __atomic_add_fetch(ptr, val, __ATOMIC_ACQUIRE);
}
/* perform the operation *ptr -= val, and return the result */
static inline atomic_t atomic_sub_fetch(atomic_t *ptr, atomic_t val)
{
return __atomic_sub_fetch(ptr, val, __ATOMIC_ACQUIRE);
}
/* perform the operation *ptr += val, and return the previous value of *ptr */
static inline atomic_t atomic_fetch_add(atomic_t *ptr, atomic_t val)
{
return __atomic_fetch_add(ptr, val, __ATOMIC_ACQUIRE);
}
/* perform the operation *ptr -= val, and return the previous value of *ptr */
static inline atomic_t atomic_fetch_sub(atomic_t *ptr, atomic_t val)
{
return __atomic_fetch_sub(ptr, val, __ATOMIC_ACQUIRE);
}
#endif

View File

@@ -13,6 +13,7 @@ enum kmsg_status {
KMSG_WAIT_RECEIVE, KMSG_WAIT_RECEIVE,
KMSG_WAIT_REPLY, KMSG_WAIT_REPLY,
KMSG_REPLY_SENT, KMSG_REPLY_SENT,
KMSG_ASYNC,
}; };
struct msg { struct msg {
@@ -20,10 +21,26 @@ struct msg {
enum kmsg_status msg_status; enum kmsg_status msg_status;
struct btree_node msg_node; struct btree_node msg_node;
msgid_t msg_id; msgid_t msg_id;
kern_status_t msg_result;
struct port *msg_sender_port; struct port *msg_sender_port;
struct thread *msg_sender_thread; struct thread *msg_sender_thread;
kern_status_t msg_result;
kern_msg_type_t msg_type;
union {
/* msg_type = KERN_MSG_TYPE_DATA */
struct {
kern_msg_t msg_req, msg_resp; kern_msg_t msg_req, msg_resp;
}; };
/* msg_type = KERN_MSG_TYPE_EVENT */
struct {
kern_msg_event_type_t msg_event;
};
};
};
extern void msg_init(void);
extern struct msg *msg_alloc(void);
extern void msg_free(struct msg *msg);
#endif #endif

View File

@@ -1,6 +1,7 @@
#ifndef KERNEL_OBJECT_H_ #ifndef KERNEL_OBJECT_H_
#define KERNEL_OBJECT_H_ #define KERNEL_OBJECT_H_
#include <kernel/atomic.h>
#include <kernel/flags.h> #include <kernel/flags.h>
#include <kernel/locks.h> #include <kernel/locks.h>
#include <kernel/vm.h> #include <kernel/vm.h>
@@ -79,10 +80,7 @@ enum object_type_flags {
}; };
struct object_ops { struct object_ops {
kern_status_t (*destroy)(struct object *obj, struct queue *q); kern_status_t (*destroy)(struct object *obj);
kern_status_t (*destroy_recurse)(
struct queue_entry *entry,
struct object **out);
}; };
struct object_type { struct object_type {
@@ -101,8 +99,7 @@ struct object {
struct object_type *ob_type; struct object_type *ob_type;
spin_lock_t ob_lock; spin_lock_t ob_lock;
uint32_t ob_signals; uint32_t ob_signals;
unsigned int ob_refcount; atomic_t ob_refcount;
unsigned int ob_handles;
struct queue_entry ob_list; struct queue_entry ob_list;
struct waitqueue ob_wq; struct waitqueue ob_wq;
} __aligned(sizeof(long)); } __aligned(sizeof(long));
@@ -114,8 +111,6 @@ extern kern_status_t object_type_unregister(struct object_type *p);
extern struct object *object_create(struct object_type *type); extern struct object *object_create(struct object_type *type);
extern struct object *object_ref(struct object *obj); extern struct object *object_ref(struct object *obj);
extern void object_unref(struct object *obj); extern void object_unref(struct object *obj);
extern void object_add_handle(struct object *obj);
extern void object_remove_handle(struct object *obj);
extern void object_lock(struct object *obj); extern void object_lock(struct object *obj);
extern void object_unlock(struct object *obj); extern void object_unlock(struct object *obj);
extern void object_lock_irqsave(struct object *obj, unsigned long *flags); extern void object_lock_irqsave(struct object *obj, unsigned long *flags);

View File

@@ -67,6 +67,7 @@ extern kern_status_t sys_task_config_set(
const void *ptr, const void *ptr,
size_t len); size_t len);
extern kern_status_t sys_thread_self(kern_handle_t *out);
extern kern_status_t sys_thread_start(kern_handle_t thread); extern kern_status_t sys_thread_start(kern_handle_t thread);
extern kern_status_t sys_thread_exit(void); extern kern_status_t sys_thread_exit(void);
extern kern_status_t sys_thread_config_get( extern kern_status_t sys_thread_config_get(

View File

@@ -13,13 +13,11 @@ enum page_request_status {
PAGE_REQUEST_PENDING = 0, PAGE_REQUEST_PENDING = 0,
PAGE_REQUEST_IN_PROGRESS, PAGE_REQUEST_IN_PROGRESS,
PAGE_REQUEST_COMPLETE, PAGE_REQUEST_COMPLETE,
PAGE_REQUEST_ASYNC,
}; };
struct vm_controller { struct vm_controller {
struct object vc_base; struct object vc_base;
/* tree of struct vm_objects bound to this controller, keyed with the
* equeue_key_t specified when the object(s) were created. */
struct btree vc_objects;
/* tree of pending page requests */ /* tree of pending page requests */
struct btree vc_requests; struct btree vc_requests;
/* the equeue to send async page requests to */ /* the equeue to send async page requests to */
@@ -36,7 +34,7 @@ struct page_request {
enum page_request_status req_status; enum page_request_status req_status;
kern_status_t req_result; kern_status_t req_result;
spin_lock_t req_lock; spin_lock_t req_lock;
struct vm_object *req_object; equeue_key_t req_object;
struct thread *req_sender; struct thread *req_sender;
struct btree_node req_node; struct btree_node req_node;
off_t req_offset; off_t req_offset;

View File

@@ -14,19 +14,25 @@ enum vm_object_flags {
VMO_IN_PLACE = 0x01u, VMO_IN_PLACE = 0x01u,
/* this vm-object is/was attached to a vm-controller */ /* this vm-object is/was attached to a vm-controller */
VMO_CONTROLLER = 0x02u, VMO_CONTROLLER = 0x02u,
/* when a vm-object is attached to a controller, and the ref-count of
* the object falls to one (i.e. the last reference is the handle to
* the object held by the server that created it), the object will
* be detached, allowing the server to close the last handle to the
* object and dispose of it. */
VMO_AUTO_DETACH = 0x04u,
/* these flags are for use with vm_object_get_page */ /* these flags are for use with vm_object_get_page */
/**************************************************/ /**************************************************/
/* if the relevant page hasn't been allocated yet, it will be allocated /* if the relevant page hasn't been allocated yet, it will be allocated
* and returned. if this flag isn't specified, NULL will be returned. */ * and returned. if this flag isn't specified, NULL will be returned. */
VMO_ALLOCATE_MISSING_PAGE = 0x04u, VMO_ALLOCATE_MISSING_PAGE = 0x08u,
/* if the vm-object is attached to a vm-controller, and the relevant /* if the vm-object is attached to a vm-controller, and the relevant
* page is uncommitted, send a request to the vm-controller to provide * page is uncommitted, send a request to the vm-controller to provide
* the missing page. will result in the vm-object being unlocked and * the missing page. will result in the vm-object being unlocked and
* the current thread sleeping until the request is fulfilled. the * the current thread sleeping until the request is fulfilled. the
* vm-object will be re-locked before the function returns. */ * vm-object will be re-locked before the function returns. */
VMO_REQUEST_MISSING_PAGE = 0x08u, VMO_REQUEST_MISSING_PAGE = 0x10u,
}; };
struct vm_object { struct vm_object {

View File

@@ -78,6 +78,7 @@ void kernel_init(uintptr_t arg)
port_type_init(); port_type_init();
channel_type_init(); channel_type_init();
msg_init();
struct boot_module bsp_image = {0}; struct boot_module bsp_image = {0};
bsp_get_location(&bsp_image); bsp_get_location(&bsp_image);

View File

@@ -98,11 +98,18 @@ static struct msg *get_next_msg(
while (cur) { while (cur) {
struct msg *msg = BTREE_CONTAINER(struct msg, msg_node, cur); struct msg *msg = BTREE_CONTAINER(struct msg, msg_node, cur);
spin_lock_irqsave(&msg->msg_lock, lock_flags); spin_lock_irqsave(&msg->msg_lock, lock_flags);
if (msg->msg_status == KMSG_WAIT_RECEIVE) { switch (msg->msg_status) {
case KMSG_WAIT_RECEIVE:
msg->msg_status = KMSG_WAIT_REPLY; msg->msg_status = KMSG_WAIT_REPLY;
msg->msg_sender_port->p_status = PORT_REPLY_BLOCKED; msg->msg_sender_port->p_status = PORT_REPLY_BLOCKED;
channel->c_msg_waiting--; channel->c_msg_waiting--;
return msg; return msg;
case KMSG_ASYNC:
btree_delete(&channel->c_msg, &msg->msg_node);
channel->c_msg_waiting--;
return msg;
default:
break;
} }
spin_unlock_irqrestore(&msg->msg_lock, *lock_flags); spin_unlock_irqrestore(&msg->msg_lock, *lock_flags);
@@ -146,24 +153,22 @@ extern kern_status_t channel_recv_msg(
&channel->c_base, &channel->c_base,
CHANNEL_SIGNAL_MSG_RECEIVED); CHANNEL_SIGNAL_MSG_RECEIVED);
} }
#if 0
wait_item_init(&waiter, self);
for (;;) {
thread_wait_begin(&waiter, &channel->c_wq);
msg = get_next_msg(channel, &msg_lock_flags);
if (msg) {
break;
}
object_unlock_irqrestore(&channel->c_base, *irq_flags);
schedule(SCHED_NORMAL);
object_lock_irqsave(&channel->c_base, irq_flags);
}
thread_wait_end(&waiter, &channel->c_wq);
#endif
/* msg is now set to the next message to process */ /* msg is now set to the next message to process */
if (msg->msg_type != KERN_MSG_TYPE_DATA) {
/* event messages as asynchronous */
out_msg->msg_id = msg->msg_id;
out_msg->msg_type = msg->msg_type;
out_msg->msg_event = msg->msg_event;
out_msg->msg_sender = msg->msg_sender_thread->tr_parent->t_id;
out_msg->msg_endpoint = msg->msg_sender_port->p_base.ob_id;
spin_unlock_irqrestore(&msg->msg_lock, msg_lock_flags);
msg_free(msg);
return KERN_OK;
}
struct task *sender = msg->msg_sender_thread->tr_parent; struct task *sender = msg->msg_sender_thread->tr_parent;
struct task *receiver = self->tr_parent; struct task *receiver = self->tr_parent;
@@ -218,6 +223,7 @@ extern kern_status_t channel_recv_msg(
} }
out_msg->msg_id = msg->msg_id; out_msg->msg_id = msg->msg_id;
out_msg->msg_type = msg->msg_type;
out_msg->msg_sender = msg->msg_sender_thread->tr_parent->t_id; out_msg->msg_sender = msg->msg_sender_thread->tr_parent->t_id;
out_msg->msg_endpoint = msg->msg_sender_port->p_base.ob_id; out_msg->msg_endpoint = msg->msg_sender_port->p_base.ob_id;

View File

@@ -46,7 +46,7 @@ static void do_handle_table_destroy_leaf(struct handle_table *tab)
struct handle *child = &tab->t_handles.t_handle_list[index]; struct handle *child = &tab->t_handles.t_handle_list[index];
bitmap_clear(tab->t_subtables.t_subtable_map, index); bitmap_clear(tab->t_subtables.t_subtable_map, index);
if (child->h_object) { if (child->h_object) {
object_remove_handle(child->h_object); object_unref(child->h_object);
child->h_object = NULL; child->h_object = NULL;
} }
} }
@@ -195,7 +195,7 @@ kern_status_t handle_table_free_handle(
= &tab->t_handles.t_handle_list[handle_index]; = &tab->t_handles.t_handle_list[handle_index];
if (handle_entry->h_object) { if (handle_entry->h_object) {
object_remove_handle(handle_entry->h_object); object_unref(handle_entry->h_object);
} }
memset(handle_entry, 0x0, sizeof *handle_entry); memset(handle_entry, 0x0, sizeof *handle_entry);
@@ -307,7 +307,7 @@ kern_status_t handle_table_transfer(
dst_entry->h_object = src_entry->h_object; dst_entry->h_object = src_entry->h_object;
dst_entry->h_flags = src_entry->h_flags; dst_entry->h_flags = src_entry->h_flags;
object_add_handle(dst_entry->h_object); object_ref(dst_entry->h_object);
handle_table_free_handle(src, src_handles[i].hnd_value); handle_table_free_handle(src, src_handles[i].hnd_value);
@@ -326,7 +326,7 @@ kern_status_t handle_table_transfer(
dst_entry->h_object = src_entry->h_object; dst_entry->h_object = src_entry->h_object;
dst_entry->h_flags = src_entry->h_flags; dst_entry->h_flags = src_entry->h_flags;
object_add_handle(dst_entry->h_object); object_ref(dst_entry->h_object);
dst_handle.hnd_mode = src_handles[i].hnd_mode; dst_handle.hnd_mode = src_handles[i].hnd_mode;
dst_handle.hnd_value = dst_value; dst_handle.hnd_value = dst_value;
@@ -371,7 +371,7 @@ kern_status_t handle_table_transfer(
struct handle *src_entry struct handle *src_entry
= handle_table_get_handle(src, handle.hnd_value); = handle_table_get_handle(src, handle.hnd_value);
if (src_entry) { if (src_entry) {
object_remove_handle(src_entry->h_object); object_unref(src_entry->h_object);
handle_table_free_handle(src, handle.hnd_value); handle_table_free_handle(src, handle.hnd_value);
} }
} }

22
kernel/msg.c Normal file
View File

@@ -0,0 +1,22 @@
#include <kernel/msg.h>
#include <kernel/vm.h>
static struct vm_cache msg_cache = {
.c_name = "msg",
.c_obj_size = sizeof(struct msg),
};
void msg_init(void)
{
vm_cache_init(&msg_cache);
}
struct msg *msg_alloc(void)
{
return vm_cache_alloc(&msg_cache, VM_NORMAL);
}
void msg_free(struct msg *msg)
{
vm_cache_free(&msg_cache, msg);
}

View File

@@ -74,90 +74,28 @@ struct object *object_create(struct object_type *type)
obj->ob_lock = SPIN_LOCK_INIT; obj->ob_lock = SPIN_LOCK_INIT;
obj->ob_magic = OBJECT_MAGIC; obj->ob_magic = OBJECT_MAGIC;
obj->ob_refcount = 1; obj->ob_refcount = 1;
obj->ob_handles = 0;
return obj; return obj;
} }
struct object *object_ref(struct object *obj) struct object *object_ref(struct object *obj)
{ {
obj->ob_refcount++; atomic_add_fetch(&obj->ob_refcount, 1);
return obj; return obj;
} }
static void __cleanup(struct object *obj, struct queue *queue)
{
if (HAS_OP(obj, destroy)) {
obj->ob_type->ob_ops.destroy(obj, queue);
}
vm_cache_free(&obj->ob_type->ob_cache, obj);
}
static void object_cleanup(struct object *obj, unsigned long flags)
{
if (obj->ob_refcount > 0 || obj->ob_handles > 0) {
spin_unlock_irqrestore(&obj->ob_lock, flags);
return;
}
struct queue queue = QUEUE_INIT;
__cleanup(obj, &queue);
if (!HAS_OP(obj, destroy_recurse)) {
return;
}
while (!queue_empty(&queue)) {
struct queue_entry *entry = queue_pop_front(&queue);
struct object *child = NULL;
obj->ob_type->ob_ops.destroy_recurse(entry, &child);
if (!child) {
continue;
}
if (child->ob_refcount > 1) {
child->ob_refcount--;
continue;
}
if (child->ob_refcount == 0 && child->ob_handles == 0) {
__cleanup(child, &queue);
}
}
}
void object_unref(struct object *obj) void object_unref(struct object *obj)
{ {
unsigned long flags; int ref = atomic_sub_fetch(&obj->ob_refcount, 1);
spin_lock_irqsave(&obj->ob_lock, &flags); if (ref > 0) {
if (obj->ob_refcount == 0) {
spin_unlock_irqrestore(&obj->ob_lock, flags);
return; return;
} }
obj->ob_refcount--; if (HAS_OP(obj, destroy)) {
object_cleanup(obj, flags); obj->ob_type->ob_ops.destroy(obj);
} }
void object_add_handle(struct object *obj) vm_cache_free(&obj->ob_type->ob_cache, obj);
{
obj->ob_handles++;
}
void object_remove_handle(struct object *obj)
{
unsigned long flags;
spin_lock_irqsave(&obj->ob_lock, &flags);
if (obj->ob_handles == 0) {
spin_unlock_irqrestore(&obj->ob_lock, flags);
return;
}
obj->ob_handles--;
object_cleanup(obj, flags);
} }
void object_lock(struct object *obj) void object_lock(struct object *obj)

View File

@@ -1,16 +1,29 @@
#include <kernel/channel.h> #include <kernel/channel.h>
#include <kernel/port.h> #include <kernel/port.h>
#include <kernel/printk.h>
#include <kernel/thread.h> #include <kernel/thread.h>
#include <kernel/util.h> #include <kernel/util.h>
#define PORT_CAST(p) OBJECT_C_CAST(struct port, p_base, &port_type, p) #define PORT_CAST(p) OBJECT_C_CAST(struct port, p_base, &port_type, p)
static kern_status_t port_cleanup(struct object *obj);
static struct object_type port_type = { static struct object_type port_type = {
.ob_name = "port", .ob_name = "port",
.ob_size = sizeof(struct port), .ob_size = sizeof(struct port),
.ob_header_offset = offsetof(struct port, p_base), .ob_header_offset = offsetof(struct port, p_base),
.ob_ops = {
.destroy = port_cleanup,
},
}; };
static kern_status_t port_cleanup(struct object *obj)
{
struct port *port = PORT_CAST(obj);
port_disconnect(port);
return KERN_OK;
}
kern_status_t port_type_init(void) kern_status_t port_type_init(void)
{ {
return object_type_register(&port_type); return object_type_register(&port_type);
@@ -58,9 +71,26 @@ struct port *port_create(void)
kern_status_t port_connect(struct port *port, struct channel *remote) kern_status_t port_connect(struct port *port, struct channel *remote)
{ {
if (port->p_status != PORT_OFFLINE) { if (port->p_status != PORT_OFFLINE) {
tracek("port_connect: port in bad state (%d)", port->p_status);
return KERN_BAD_STATE; return KERN_BAD_STATE;
} }
struct msg *msg = msg_alloc();
if (!msg) {
return KERN_NO_MEMORY;
}
msg->msg_status = KMSG_ASYNC;
msg->msg_type = KERN_MSG_TYPE_EVENT;
msg->msg_event = KERN_MSG_EVENT_CONNECTION;
msg->msg_sender_thread = current_thread();
msg->msg_sender_port = port;
unsigned long flags;
channel_lock_irqsave(remote, &flags);
channel_enqueue_msg(remote, msg);
channel_unlock_irqrestore(remote, flags);
port->p_remote = remote; port->p_remote = remote;
port->p_status = PORT_READY; port->p_status = PORT_READY;
return KERN_OK; return KERN_OK;
@@ -69,9 +99,27 @@ kern_status_t port_connect(struct port *port, struct channel *remote)
kern_status_t port_disconnect(struct port *port) kern_status_t port_disconnect(struct port *port)
{ {
if (port->p_status != PORT_READY) { if (port->p_status != PORT_READY) {
tracek("port_disconnect: port in bad state (%d)",
port->p_status);
return KERN_BAD_STATE; return KERN_BAD_STATE;
} }
struct msg *msg = msg_alloc();
if (!msg) {
return KERN_NO_MEMORY;
}
msg->msg_status = KMSG_ASYNC;
msg->msg_type = KERN_MSG_TYPE_EVENT;
msg->msg_event = KERN_MSG_EVENT_DISCONNECTION;
msg->msg_sender_thread = current_thread();
msg->msg_sender_port = port;
unsigned long flags;
channel_lock_irqsave(port->p_remote, &flags);
channel_enqueue_msg(port->p_remote, msg);
channel_unlock_irqrestore(port->p_remote, flags);
port->p_remote = NULL; port->p_remote = NULL;
port->p_status = PORT_OFFLINE; port->p_status = PORT_OFFLINE;
return KERN_OK; return KERN_OK;
@@ -84,12 +132,14 @@ kern_status_t port_send_msg(
unsigned long *lock_flags) unsigned long *lock_flags)
{ {
if (port->p_status != PORT_READY) { if (port->p_status != PORT_READY) {
tracek("port_send_msg: port in bad state (%d)", port->p_status);
return KERN_BAD_STATE; return KERN_BAD_STATE;
} }
struct thread *self = current_thread(); struct thread *self = current_thread();
struct msg msg; struct msg msg;
memset(&msg, 0x0, sizeof msg); memset(&msg, 0x0, sizeof msg);
msg.msg_type = KERN_MSG_TYPE_DATA;
msg.msg_status = KMSG_WAIT_RECEIVE; msg.msg_status = KMSG_WAIT_RECEIVE;
msg.msg_sender_thread = self; msg.msg_sender_thread = self;
msg.msg_sender_port = port; msg.msg_sender_port = port;

View File

@@ -1,7 +1,6 @@
file(GLOB headers ${CMAKE_CURRENT_SOURCE_DIR}/include/mango/*.h) file(GLOB headers ${CMAKE_CURRENT_SOURCE_DIR}/include/mango/*.h)
file(GLOB asm_sources file(GLOB asm_sources
${CMAKE_CURRENT_SOURCE_DIR}/arch/${CMAKE_SYSTEM_PROCESSOR}/*.S) ${CMAKE_CURRENT_SOURCE_DIR}/arch/${CMAKE_SYSTEM_PROCESSOR}/*.S)
set_property(SOURCE ${asm_sources} PROPERTY LANGUAGE C)
set(public_include_dirs set(public_include_dirs
${CMAKE_CURRENT_SOURCE_DIR}/include ${CMAKE_CURRENT_SOURCE_DIR}/include

View File

@@ -56,13 +56,14 @@
.endm .endm
SYSCALL_GATE task_exit SYS_TASK_EXIT 1 SYSCALL_GATE task_exit SYS_TASK_EXIT 1
SYSCALL_GATE task_self SYS_TASK_SELF 0 SYSCALL_GATE task_self SYS_TASK_SELF 1
SYSCALL_GATE task_create SYS_TASK_CREATE 5 SYSCALL_GATE task_create SYS_TASK_CREATE 5
SYSCALL_GATE task_create_thread SYS_TASK_CREATE_THREAD 6 SYSCALL_GATE task_create_thread SYS_TASK_CREATE_THREAD 6
SYSCALL_GATE task_get_address_space SYS_TASK_GET_ADDRESS_SPACE 1 SYSCALL_GATE task_get_address_space SYS_TASK_GET_ADDRESS_SPACE 1
SYSCALL_GATE task_config_get SYS_TASK_CONFIG_GET 4 SYSCALL_GATE task_config_get SYS_TASK_CONFIG_GET 4
SYSCALL_GATE task_config_set SYS_TASK_CONFIG_SET 4 SYSCALL_GATE task_config_set SYS_TASK_CONFIG_SET 4
SYSCALL_GATE thread_self SYS_THREAD_SELF 1
SYSCALL_GATE thread_start SYS_THREAD_START 1 SYSCALL_GATE thread_start SYS_THREAD_START 1
SYSCALL_GATE thread_exit SYS_THREAD_EXIT 0 SYSCALL_GATE thread_exit SYS_THREAD_EXIT 0
SYSCALL_GATE thread_config_get SYS_THREAD_CONFIG_GET 4 SYSCALL_GATE thread_config_get SYS_THREAD_CONFIG_GET 4

View File

@@ -34,6 +34,7 @@ extern kern_status_t task_config_set(
const void *ptr, const void *ptr,
size_t len); size_t len);
extern kern_status_t thread_self(kern_handle_t *out);
extern kern_status_t thread_start(kern_handle_t thread); extern kern_status_t thread_start(kern_handle_t thread);
extern kern_status_t thread_exit(void); extern kern_status_t thread_exit(void);
extern kern_status_t thread_config_get( extern kern_status_t thread_config_get(

View File

@@ -1,52 +1,53 @@
#ifndef MANGO_SYSCALL_H_ #ifndef MANGO_SYSCALL_H_
#define MANGO_SYSCALL_H_ #define MANGO_SYSCALL_H_
#define SYS_KERN_LOG 0x00u #define SYS_KERN_LOG 1
#define SYS_KERN_HANDLE_CLOSE 0x01u #define SYS_KERN_HANDLE_CLOSE 2
#define SYS_KERN_HANDLE_DUPLICATE 0x02u #define SYS_KERN_HANDLE_DUPLICATE 3
#define SYS_KERN_CONFIG_GET 0x03u #define SYS_KERN_CONFIG_GET 4
#define SYS_KERN_CONFIG_SET 0x04u #define SYS_KERN_CONFIG_SET 5
#define SYS_KERN_OBJECT_WAIT 0x05u #define SYS_KERN_OBJECT_WAIT 6
#define SYS_KERN_OBJECT_WAIT_ASYNC 0x06u #define SYS_KERN_OBJECT_WAIT_ASYNC 7
#define SYS_TASK_EXIT 0x07u #define SYS_TASK_EXIT 8
#define SYS_TASK_SELF 0x08u #define SYS_TASK_SELF 9
#define SYS_TASK_CREATE 0x09u #define SYS_TASK_CREATE 10
#define SYS_TASK_CREATE_THREAD 0x0Au #define SYS_TASK_CREATE_THREAD 11
#define SYS_TASK_GET_ADDRESS_SPACE 0x0Bu #define SYS_TASK_GET_ADDRESS_SPACE 12
#define SYS_TASK_CONFIG_GET 0x2Au #define SYS_TASK_CONFIG_GET 13
#define SYS_TASK_CONFIG_SET 0x2Bu #define SYS_TASK_CONFIG_SET 14
#define SYS_THREAD_START 0x0Cu #define SYS_THREAD_SELF 15
#define SYS_THREAD_EXIT 0x2Eu #define SYS_THREAD_START 16
#define SYS_THREAD_CONFIG_GET 0x2Cu #define SYS_THREAD_EXIT 17
#define SYS_THREAD_CONFIG_SET 0x2Du #define SYS_THREAD_CONFIG_GET 18
#define SYS_VM_OBJECT_CREATE 0x0Du #define SYS_THREAD_CONFIG_SET 19
#define SYS_VM_OBJECT_READ 0x0Eu #define SYS_VM_OBJECT_CREATE 20
#define SYS_VM_OBJECT_WRITE 0x0Fu #define SYS_VM_OBJECT_READ 21
#define SYS_VM_OBJECT_COPY 0x10u #define SYS_VM_OBJECT_WRITE 22
#define SYS_ADDRESS_SPACE_READ 0x11u #define SYS_VM_OBJECT_COPY 23
#define SYS_ADDRESS_SPACE_WRITE 0x12u #define SYS_ADDRESS_SPACE_READ 24
#define SYS_ADDRESS_SPACE_MAP 0x13u #define SYS_ADDRESS_SPACE_WRITE 25
#define SYS_ADDRESS_SPACE_UNMAP 0x14u #define SYS_ADDRESS_SPACE_MAP 26
#define SYS_ADDRESS_SPACE_RESERVE 0x15u #define SYS_ADDRESS_SPACE_UNMAP 27
#define SYS_ADDRESS_SPACE_RELEASE 0x16u #define SYS_ADDRESS_SPACE_RESERVE 28
#define SYS_MSG_SEND 0x17u #define SYS_ADDRESS_SPACE_RELEASE 29
#define SYS_MSG_RECV 0x18u #define SYS_MSG_SEND 30
#define SYS_MSG_REPLY 0x19u #define SYS_MSG_RECV 31
#define SYS_MSG_READ 0x1Au #define SYS_MSG_REPLY 32
#define SYS_MSG_WRITE 0x1Bu #define SYS_MSG_READ 33
#define SYS_CHANNEL_CREATE 0x1Cu #define SYS_MSG_WRITE 34
#define SYS_PORT_CREATE 0x1Du #define SYS_CHANNEL_CREATE 35
#define SYS_PORT_CONNECT 0x1Eu #define SYS_PORT_CREATE 36
#define SYS_PORT_DISCONNECT 0x1Fu #define SYS_PORT_CONNECT 37
#define SYS_EQUEUE_CREATE 0x20u #define SYS_PORT_DISCONNECT 38
#define SYS_EQUEUE_DEQUEUE 0x21u #define SYS_EQUEUE_CREATE 39
#define SYS_VM_CONTROLLER_CREATE 0x22u #define SYS_EQUEUE_DEQUEUE 40
#define SYS_VM_CONTROLLER_RECV 0x23u #define SYS_VM_CONTROLLER_CREATE 41
#define SYS_VM_CONTROLLER_RECV_ASYNC 0x24u #define SYS_VM_CONTROLLER_RECV 42
#define SYS_VM_CONTROLLER_CREATE_OBJECT 0x25u #define SYS_VM_CONTROLLER_RECV_ASYNC 43
#define SYS_VM_CONTROLLER_DETACH_OBJECT 0x26u #define SYS_VM_CONTROLLER_CREATE_OBJECT 44
#define SYS_VM_CONTROLLER_SUPPLY_PAGES 0x27u #define SYS_VM_CONTROLLER_DETACH_OBJECT 45
#define SYS_FUTEX_WAIT 0x28u #define SYS_VM_CONTROLLER_SUPPLY_PAGES 46
#define SYS_FUTEX_WAKE 0x29u #define SYS_FUTEX_WAIT 47
#define SYS_FUTEX_WAKE 48
#endif #endif

View File

@@ -44,6 +44,16 @@
* kern_object_wait */ * kern_object_wait */
#define KERN_WAIT_MAX_ITEMS 64 #define KERN_WAIT_MAX_ITEMS 64
/* message types */
#define KERN_MSG_TYPE_NONE 0
#define KERN_MSG_TYPE_DATA 1
#define KERN_MSG_TYPE_EVENT 2
/* event message types */
#define KERN_MSG_EVENT_NONE 0
#define KERN_MSG_EVENT_CONNECTION 1
#define KERN_MSG_EVENT_DISCONNECTION 2
/* equeue packet types */ /* equeue packet types */
#define EQUEUE_PKT_PAGE_REQUEST 0x01u #define EQUEUE_PKT_PAGE_REQUEST 0x01u
#define EQUEUE_PKT_ASYNC_SIGNAL 0x02u #define EQUEUE_PKT_ASYNC_SIGNAL 0x02u
@@ -92,6 +102,8 @@ typedef uint32_t kern_config_key_t;
typedef uint32_t vm_prot_t; typedef uint32_t vm_prot_t;
typedef int64_t ssize_t; typedef int64_t ssize_t;
typedef uint32_t kern_futex_t; typedef uint32_t kern_futex_t;
typedef uint32_t kern_msg_type_t;
typedef uint32_t kern_msg_event_type_t;
typedef unsigned short equeue_packet_type_t; typedef unsigned short equeue_packet_type_t;
@@ -122,14 +134,27 @@ typedef struct {
tid_t msg_sender; tid_t msg_sender;
/* the id of the port or channel used to send a particular message. */ /* the id of the port or channel used to send a particular message. */
koid_t msg_endpoint; koid_t msg_endpoint;
/* a list of iovecs that point to the buffers that make up the main /* the message type */
* message data. */ kern_msg_type_t msg_type;
union {
/* msg_type = KERN_MSG_TYPE_DATA */
struct {
/* a list of iovecs that point to the buffers that make
* up the main message data. */
kern_iovec_t *msg_data; kern_iovec_t *msg_data;
size_t msg_data_count; size_t msg_data_count;
/* a list of handle entries that contain the kernel handles included /* a list of handle entries that contain the kernel
* in a message. */ * handles included in a message. */
kern_msg_handle_t *msg_handles; kern_msg_handle_t *msg_handles;
size_t msg_handles_count; size_t msg_handles_count;
};
/* msg_type = KERN_MSG_TYPE_EVENT */
struct {
kern_msg_event_type_t msg_event;
};
};
} kern_msg_t; } kern_msg_t;
typedef struct { typedef struct {

View File

@@ -330,15 +330,14 @@ void task_exit(int status)
task_unlock(self); task_unlock(self);
handle_table_destroy(handles); handle_table_destroy(handles);
printk("thread %s[%u.%u] killed", tracek("thread %s[%u.%u] killed",
self->t_name, self->t_name,
self->t_id, self->t_id,
cur_thread->tr_id); cur_thread->tr_id);
printk("task %s[%u] killed (%u, %u)", tracek("task %s[%u] killed (%u)",
self->t_name, self->t_name,
self->t_id, self->t_id,
self->t_base.ob_refcount, self->t_base.ob_refcount);
self->t_base.ob_handles);
spin_unlock_irqrestore(handles_lock, flags); spin_unlock_irqrestore(handles_lock, flags);
while (1) { while (1) {
@@ -359,7 +358,7 @@ kern_status_t task_open_handle(
return status; return status;
} }
object_add_handle(obj); object_ref(obj);
handle_data->h_object = obj; handle_data->h_object = obj;
handle_data->h_flags = flags; handle_data->h_flags = flags;

View File

@@ -148,7 +148,7 @@ void thread_exit(void)
thread_lock_irqsave(self, &flags); thread_lock_irqsave(self, &flags);
self->tr_state = THREAD_STOPPED; self->tr_state = THREAD_STOPPED;
object_assert_signal(&self->tr_base, THREAD_SIGNAL_STOPPED); object_assert_signal(&self->tr_base, THREAD_SIGNAL_STOPPED);
printk("thread %s[%u.%u] exited", tracek("thread %s[%u.%u] exited",
self->tr_parent->t_name, self->tr_parent->t_name,
self->tr_parent->t_id, self->tr_parent->t_id,
self->tr_id); self->tr_id);
@@ -184,7 +184,7 @@ void thread_kill(struct thread *thread)
} }
object_assert_signal(&thread->tr_base, THREAD_SIGNAL_STOPPED); object_assert_signal(&thread->tr_base, THREAD_SIGNAL_STOPPED);
printk("thread %s[%u.%u] killed", tracek("thread %s[%u.%u] killed",
thread->tr_parent->t_name, thread->tr_parent->t_name,
thread->tr_parent->t_id, thread->tr_parent->t_id,
thread->tr_id); thread->tr_id);

View File

@@ -10,6 +10,7 @@ static const virt_addr_t syscall_table[] = {
SYSCALL_TABLE_ENTRY(TASK_CREATE, task_create), SYSCALL_TABLE_ENTRY(TASK_CREATE, task_create),
SYSCALL_TABLE_ENTRY(TASK_CREATE_THREAD, task_create_thread), SYSCALL_TABLE_ENTRY(TASK_CREATE_THREAD, task_create_thread),
SYSCALL_TABLE_ENTRY(TASK_GET_ADDRESS_SPACE, task_get_address_space), SYSCALL_TABLE_ENTRY(TASK_GET_ADDRESS_SPACE, task_get_address_space),
SYSCALL_TABLE_ENTRY(THREAD_SELF, thread_self),
SYSCALL_TABLE_ENTRY(THREAD_START, thread_start), SYSCALL_TABLE_ENTRY(THREAD_START, thread_start),
SYSCALL_TABLE_ENTRY(THREAD_EXIT, thread_exit), SYSCALL_TABLE_ENTRY(THREAD_EXIT, thread_exit),
SYSCALL_TABLE_ENTRY(THREAD_CONFIG_GET, thread_config_get), SYSCALL_TABLE_ENTRY(THREAD_CONFIG_GET, thread_config_get),

View File

@@ -60,9 +60,9 @@ kern_status_t sys_port_create(kern_handle_t *out)
kern_status_t status kern_status_t status
= task_open_handle(self, &port->p_base, 0, &handle); = task_open_handle(self, &port->p_base, 0, &handle);
task_unlock_irqrestore(self, irq_flags); task_unlock_irqrestore(self, irq_flags);
object_unref(&port->p_base);
if (status != KERN_OK) { if (status != KERN_OK) {
object_unref(&port->p_base);
return status; return status;
} }
@@ -114,6 +114,7 @@ kern_status_t sys_port_connect(
status = port_connect(port, remote); status = port_connect(port, remote);
port_unlock_irqrestore(port, flags); port_unlock_irqrestore(port, flags);
object_unref(&remote->c_base); object_unref(&remote->c_base);
object_unref(port_obj);
return KERN_OK; return KERN_OK;
} }

View File

@@ -8,8 +8,10 @@
extern kern_status_t sys_task_exit(int status) extern kern_status_t sys_task_exit(int status)
{ {
#if defined(TRACE)
struct task *self = current_task(); struct task *self = current_task();
printk("%s[%d]: task_exit(%d)", self->t_name, self->t_id, status); printk("%s[%d]: task_exit(%d)", self->t_name, self->t_id, status);
#endif
task_exit(status); task_exit(status);
return KERN_FATAL_ERROR; return KERN_FATAL_ERROR;
} }
@@ -36,7 +38,7 @@ kern_status_t sys_task_self(kern_handle_t *out)
return status; return status;
} }
object_add_handle(&self->t_base); object_ref(&self->t_base);
handle_slot->h_object = &self->t_base; handle_slot->h_object = &self->t_base;
*out = handle; *out = handle;
@@ -127,8 +129,8 @@ kern_status_t sys_task_create(
child_handle_slot->h_object = &child->t_base; child_handle_slot->h_object = &child->t_base;
space_handle_slot->h_object = &child->t_address_space->s_base; space_handle_slot->h_object = &child->t_address_space->s_base;
object_add_handle(&child->t_base); object_ref(&child->t_base);
object_add_handle(&child->t_address_space->s_base); object_ref(&child->t_address_space->s_base);
object_unref(parent_obj); object_unref(parent_obj);
@@ -197,7 +199,7 @@ kern_status_t sys_task_create_thread(
thread_init_user(thread, ip, sp, args, nr_args); thread_init_user(thread, ip, sp, args, nr_args);
target_handle->h_object = &thread->tr_base; target_handle->h_object = &thread->tr_base;
object_add_handle(&thread->tr_base); object_ref(&thread->tr_base);
task_unlock_irqrestore(target, flags); task_unlock_irqrestore(target, flags);
object_unref(target_obj); object_unref(target_obj);
@@ -252,7 +254,7 @@ kern_status_t sys_task_get_address_space(
} }
handle_slot->h_object = &task->t_address_space->s_base; handle_slot->h_object = &task->t_address_space->s_base;
object_add_handle(&task->t_address_space->s_base); object_ref(&task->t_address_space->s_base);
task_unlock_irqrestore(self, flags); task_unlock_irqrestore(self, flags);
object_unref(task_obj); object_unref(task_obj);
@@ -260,6 +262,37 @@ kern_status_t sys_task_get_address_space(
return KERN_OK; return KERN_OK;
} }
kern_status_t sys_thread_self(kern_handle_t *out)
{
struct task *self = current_task();
if (!validate_access_w(self, out, sizeof *out)) {
return KERN_MEMORY_FAULT;
}
struct thread *self_thread = current_thread();
unsigned long flags;
task_lock_irqsave(self, &flags);
struct handle *handle_slot = NULL;
kern_handle_t handle;
kern_status_t status = handle_table_alloc_handle(
self->t_handles,
&handle_slot,
&handle);
task_unlock_irqrestore(self, flags);
if (status != KERN_OK) {
return status;
}
object_ref(&self_thread->tr_base);
handle_slot->h_object = &self_thread->tr_base;
*out = handle;
return KERN_OK;
}
kern_status_t sys_thread_start(kern_handle_t thread_handle) kern_status_t sys_thread_start(kern_handle_t thread_handle)
{ {
unsigned long flags; unsigned long flags;

View File

@@ -186,8 +186,6 @@ kern_status_t sys_vm_controller_create_object(
} }
out_slot->h_object = &out_vmo->vo_base; out_slot->h_object = &out_vmo->vo_base;
object_add_handle(&out_vmo->vo_base);
object_unref(&out_vmo->vo_base);
*out = out_handle; *out = out_handle;
return KERN_OK; return KERN_OK;

View File

@@ -4,7 +4,10 @@
#include <kernel/object.h> #include <kernel/object.h>
#include <kernel/panic.h> #include <kernel/panic.h>
#include <kernel/printk.h> #include <kernel/printk.h>
#include <kernel/sched.h>
#include <kernel/task.h>
#include <kernel/util.h> #include <kernel/util.h>
#include <kernel/vm-controller.h>
#include <kernel/vm-object.h> #include <kernel/vm-object.h>
#include <mango/status.h> #include <mango/status.h>
@@ -45,7 +48,7 @@ enum search_direction {
SEARCH_RIGHT, SEARCH_RIGHT,
}; };
static kern_status_t address_space_cleanup(struct object *obj, struct queue *q); static kern_status_t address_space_cleanup(struct object *obj);
static struct object_type address_space_type = { static struct object_type address_space_type = {
.ob_name = "address-space", .ob_name = "address-space",
@@ -139,16 +142,16 @@ static struct vm_area *get_entry(
if (address < child->vma_base) { if (address < child->vma_base) {
next = btree_left(cur); next = btree_left(cur);
if (LEFT_DIFF(address, child)
< LEFT_DIFF(address, closest_left)) {
closest_left = child;
}
} else if (address > child->vma_limit) {
next = btree_right(cur);
if (RIGHT_DIFF(address, child) if (RIGHT_DIFF(address, child)
< RIGHT_DIFF(address, closest_right)) { < RIGHT_DIFF(address, closest_right)) {
closest_right = child; closest_right = child;
} }
} else if (address > child->vma_limit) {
next = btree_right(cur);
if (LEFT_DIFF(address, child)
< LEFT_DIFF(address, closest_left)) {
closest_left = child;
}
} else { } else {
result = child; result = child;
break; break;
@@ -662,9 +665,10 @@ static void area_unmap(struct vm_area *area)
} }
} }
static kern_status_t address_space_cleanup(struct object *obj, struct queue *q) static kern_status_t address_space_cleanup(struct object *obj)
{ {
struct address_space *space = ADDRESS_SPACE_CAST(obj); struct address_space *space = ADDRESS_SPACE_CAST(obj);
tracek("begin address space cleanup %p", space);
struct btree_node *cur = btree_first(&space->s_mappings); struct btree_node *cur = btree_first(&space->s_mappings);
while (cur) { while (cur) {
struct btree_node *next = btree_next(cur); struct btree_node *next = btree_next(cur);
@@ -678,6 +682,20 @@ static kern_status_t address_space_cleanup(struct object *obj, struct queue *q)
cur = next; cur = next;
} }
cur = btree_first(&space->s_reserved);
while (cur) {
struct btree_node *next = btree_next(cur);
struct vm_area *area
= BTREE_CONTAINER(struct vm_area, vma_node, cur);
btree_delete(&space->s_reserved, cur);
delete_area(area, space);
vm_cache_free(&vm_area_cache, area);
cur = next;
}
tracek("end address space cleanup %p", space);
return KERN_OK; return KERN_OK;
} }
@@ -704,6 +722,9 @@ kern_status_t address_space_map(
} }
tracek("address_space_map(%zx, %zx)", map_address, length); tracek("address_space_map(%zx, %zx)", map_address, length);
if (map_address == 0xc6a55000) {
printk("break");
}
if (!root || !object) { if (!root || !object) {
tracek("null pointer"); tracek("null pointer");
@@ -821,7 +842,7 @@ static kern_status_t split_area(
put_entry(&root->s_mappings, right); put_entry(&root->s_mappings, right);
for (size_t i = unmap_base; i < unmap_limit; i += VM_PAGE_SIZE) { for (size_t i = unmap_base; i < unmap_limit; i += VM_PAGE_SIZE) {
tracek("unmapping %zx", i); tracek("pmap_remove %zx", i);
pmap_remove(root->s_pmap, i); pmap_remove(root->s_pmap, i);
} }
@@ -858,7 +879,7 @@ static kern_status_t left_reduce_area(
return KERN_OK; return KERN_OK;
} }
tracek(" unmapping %zx-%zx (%zx bytes)", base, base + length, length); tracek(" pmap_remove %zx-%zx (%zx bytes)", base, base + length, length);
for (size_t i = base; i < limit; i += VM_PAGE_SIZE) { for (size_t i = base; i < limit; i += VM_PAGE_SIZE) {
pmap_remove(root->s_pmap, i); pmap_remove(root->s_pmap, i);
} }
@@ -895,7 +916,7 @@ static kern_status_t right_reduce_area(
return KERN_OK; return KERN_OK;
} }
tracek(" unmapping %zx-%zx (%zx bytes)", base, base + length, length); tracek(" pmap_remove %zx-%zx (%zx bytes)", base, base + length, length);
for (size_t i = base; i < limit; i += VM_PAGE_SIZE) { for (size_t i = base; i < limit; i += VM_PAGE_SIZE) {
pmap_remove(root->s_pmap, i); pmap_remove(root->s_pmap, i);
} }
@@ -913,9 +934,10 @@ static kern_status_t delete_area(
return KERN_OK; return KERN_OK;
} }
tracek("delete mapping [%zx-%zx]", tracek("delete mapping [%zx-%zx] (%zx bytes)",
mapping->vma_base, mapping->vma_base,
mapping->vma_limit); mapping->vma_limit,
mapping->vma_limit - mapping->vma_base);
for (size_t i = mapping->vma_base; i < mapping->vma_limit; for (size_t i = mapping->vma_base; i < mapping->vma_limit;
i += VM_PAGE_SIZE) { i += VM_PAGE_SIZE) {
@@ -929,7 +951,23 @@ static kern_status_t delete_area(
&mapping->vma_object->vo_mappings, &mapping->vma_object->vo_mappings,
&mapping->vma_object_entry); &mapping->vma_object_entry);
mapping->vma_object = NULL; mapping->vma_object = NULL;
/* if the object is attached to a controller and the ref-count is 2,
* then the only other remaining reference to this object is held by
* the controller. */
struct vm_controller *ctrl = object->vo_ctrl;
bool detach = ctrl != NULL && object->vo_base.ob_refcount == 2
&& (object->vo_flags & VMO_AUTO_DETACH);
vm_object_unlock_irqrestore(object, flags); vm_object_unlock_irqrestore(object, flags);
if (detach) {
/* TODO find a better way to achieve this, and/or give the
* server that created the object more control over when it
* should be detached */
vm_controller_lock_irqsave(ctrl, &flags);
vm_controller_detach_object(ctrl, object);
vm_controller_unlock_irqrestore(ctrl, flags);
}
object_unref(&object->vo_base); object_unref(&object->vo_base);
/* don't actually delete the mapping yet. that will be done by /* don't actually delete the mapping yet. that will be done by
@@ -978,9 +1016,11 @@ kern_status_t address_space_unmap(
= (area_base <= unmap_base = (area_base <= unmap_base
&& area_limit >= unmap_limit); && area_limit >= unmap_limit);
bool left_reduce bool left_reduce
= (unmap_base <= area_base && unmap_limit < area_limit); = (unmap_base <= area_base && unmap_limit > area_base
&& unmap_limit < area_limit);
bool right_reduce bool right_reduce
= (unmap_base > area_base && unmap_limit >= area_limit); = (unmap_base > area_base && unmap_base < area_limit
&& unmap_limit >= area_limit);
if (split) { if (split) {
status = split_area( status = split_area(
@@ -1118,9 +1158,10 @@ kern_status_t address_space_release(
&& area_limit >= release_limit); && area_limit >= release_limit);
bool left_reduce bool left_reduce
= (release_base <= area_base = (release_base <= area_base
&& release_limit > area_base
&& release_limit < area_limit); && release_limit < area_limit);
bool right_reduce bool right_reduce
= (release_base > area_base = (release_base > area_base && release_base < area_limit
&& release_limit >= area_limit); && release_limit >= area_limit);
if (split) { if (split) {
@@ -1202,7 +1243,7 @@ bool address_space_validate_access(
return false; return false;
} }
i = area->vma_limit; i = area->vma_limit + 1;
} }
return true; return true;
@@ -1297,7 +1338,14 @@ kern_status_t address_space_demand_map(
object_offset, object_offset,
VMO_ALLOCATE_MISSING_PAGE, VMO_ALLOCATE_MISSING_PAGE,
NULL); NULL);
// tracek("vm: mapping %07llx -> %10llx", vm_page_get_paddr(pg), addr); #if 0
struct task *self = current_task();
printk("vm: %s[%d] mapping %07llx -> %10llx",
self->t_name,
self->t_id,
vm_page_get_paddr(pg),
addr);
#endif
if (!pg) { if (!pg) {
return KERN_FATAL_ERROR; return KERN_FATAL_ERROR;

View File

@@ -23,8 +23,14 @@ static struct object_type vm_controller_type = {
.ob_header_offset = offsetof(struct vm_controller, vc_base), .ob_header_offset = offsetof(struct vm_controller, vc_base),
}; };
static struct vm_cache page_request_cache = {
.c_name = "page-request",
.c_obj_size = sizeof(struct page_request),
};
kern_status_t vm_controller_type_init(void) kern_status_t vm_controller_type_init(void)
{ {
vm_cache_init(&page_request_cache);
return object_type_register(&vm_controller_type); return object_type_register(&vm_controller_type);
} }
@@ -52,10 +58,17 @@ static struct page_request *get_next_request(struct vm_controller *ctrl)
struct page_request *req struct page_request *req
= BTREE_CONTAINER(struct page_request, req_node, cur); = BTREE_CONTAINER(struct page_request, req_node, cur);
spin_lock(&req->req_lock); spin_lock(&req->req_lock);
if (req->req_status == PAGE_REQUEST_PENDING) { switch (req->req_status) {
case PAGE_REQUEST_PENDING:
req->req_status = PAGE_REQUEST_IN_PROGRESS; req->req_status = PAGE_REQUEST_IN_PROGRESS;
ctrl->vc_requests_waiting--; ctrl->vc_requests_waiting--;
return req; return req;
case PAGE_REQUEST_ASYNC:
btree_delete(&ctrl->vc_requests, &req->req_node);
ctrl->vc_requests_waiting--;
return req;
default:
break;
} }
spin_unlock(&req->req_lock); spin_unlock(&req->req_lock);
@@ -65,98 +78,6 @@ static struct page_request *get_next_request(struct vm_controller *ctrl)
return NULL; return NULL;
} }
kern_status_t vm_controller_recv(
struct vm_controller *ctrl,
equeue_packet_page_request_t *out)
{
struct page_request *req = NULL;
req = get_next_request(ctrl);
if (!req) {
return KERN_NO_ENTRY;
}
if (ctrl->vc_requests_waiting == 0) {
object_clear_signal(
&ctrl->vc_base,
VM_CONTROLLER_SIGNAL_REQUEST_RECEIVED);
}
out->req_vmo = req->req_object->vo_key;
out->req_type = req->req_type;
out->req_offset = req->req_offset;
out->req_length = req->req_length;
spin_unlock(&req->req_lock);
return KERN_OK;
}
kern_status_t vm_controller_recv_async(
struct vm_controller *ctrl,
struct equeue *eq,
equeue_key_t key)
{
if (ctrl->vc_eq) {
object_unref(&ctrl->vc_eq->eq_base);
}
object_ref(&eq->eq_base);
ctrl->vc_eq = eq;
ctrl->vc_eq_key = key;
return KERN_OK;
}
kern_status_t vm_controller_create_object(
struct vm_controller *ctrl,
const char *name,
size_t name_len,
equeue_key_t key,
size_t data_len,
vm_prot_t prot,
struct vm_object **out)
{
struct vm_object *vmo = get_object(&ctrl->vc_objects, key);
if (vmo) {
return KERN_NAME_EXISTS;
}
vmo = vm_object_create(name, name_len, data_len, prot);
if (!vmo) {
return KERN_NO_MEMORY;
}
object_ref(&ctrl->vc_base);
object_ref(&vmo->vo_base);
vmo->vo_flags |= VMO_CONTROLLER;
vmo->vo_ctrl = ctrl;
vmo->vo_key = key;
put_object(&ctrl->vc_objects, vmo);
*out = vmo;
return KERN_OK;
}
kern_status_t vm_controller_detach_object(
struct vm_controller *ctrl,
struct vm_object *vmo)
{
if (vmo->vo_ctrl != ctrl) {
return KERN_INVALID_ARGUMENT;
}
vmo->vo_ctrl = NULL;
vmo->vo_key = 0;
btree_delete(&ctrl->vc_objects, &vmo->vo_ctrl_node);
object_unref(&ctrl->vc_base);
object_unref(&vmo->vo_base);
return KERN_OK;
}
static kern_status_t try_enqueue(struct btree *tree, struct page_request *req) static kern_status_t try_enqueue(struct btree *tree, struct page_request *req)
{ {
if (!tree->b_root) { if (!tree->b_root) {
@@ -196,6 +117,119 @@ static kern_status_t try_enqueue(struct btree *tree, struct page_request *req)
return true; return true;
} }
static kern_status_t send_request_async(
struct vm_controller *ctrl,
struct page_request *req)
{
fill_random(&req->req_id, sizeof req->req_id);
while (!try_enqueue(&ctrl->vc_requests, req)) {
req->req_id++;
}
ctrl->vc_requests_waiting++;
object_assert_signal(
&ctrl->vc_base,
VM_CONTROLLER_SIGNAL_REQUEST_RECEIVED);
return KERN_OK;
}
kern_status_t vm_controller_recv(
struct vm_controller *ctrl,
equeue_packet_page_request_t *out)
{
struct page_request *req = NULL;
req = get_next_request(ctrl);
if (!req) {
return KERN_NO_ENTRY;
}
if (ctrl->vc_requests_waiting == 0) {
object_clear_signal(
&ctrl->vc_base,
VM_CONTROLLER_SIGNAL_REQUEST_RECEIVED);
}
out->req_vmo = req->req_object;
out->req_type = req->req_type;
out->req_offset = req->req_offset;
out->req_length = req->req_length;
spin_unlock(&req->req_lock);
if (req->req_status == PAGE_REQUEST_ASYNC) {
vm_cache_free(&page_request_cache, req);
}
return KERN_OK;
}
kern_status_t vm_controller_recv_async(
struct vm_controller *ctrl,
struct equeue *eq,
equeue_key_t key)
{
if (ctrl->vc_eq) {
object_unref(&ctrl->vc_eq->eq_base);
}
object_ref(&eq->eq_base);
ctrl->vc_eq = eq;
ctrl->vc_eq_key = key;
return KERN_OK;
}
kern_status_t vm_controller_create_object(
struct vm_controller *ctrl,
const char *name,
size_t name_len,
equeue_key_t key,
size_t data_len,
vm_prot_t prot,
struct vm_object **out)
{
struct vm_object *vmo
= vm_object_create(name, name_len, data_len, prot);
if (!vmo) {
return KERN_NO_MEMORY;
}
object_ref(&ctrl->vc_base);
/* TODO expose the VMO_AUTO_DETACH flag to userspace */
vmo->vo_flags |= VMO_CONTROLLER | VMO_AUTO_DETACH;
vmo->vo_ctrl = ctrl;
vmo->vo_key = key;
*out = vmo;
return KERN_OK;
}
kern_status_t vm_controller_detach_object(
struct vm_controller *ctrl,
struct vm_object *vmo)
{
if (vmo->vo_ctrl != ctrl) {
return KERN_INVALID_ARGUMENT;
}
struct page_request *req
= vm_cache_alloc(&page_request_cache, VM_NORMAL);
req->req_type = PAGE_REQUEST_DETACH;
req->req_status = PAGE_REQUEST_ASYNC;
req->req_object = vmo->vo_key;
req->req_sender = current_thread();
send_request_async(ctrl, req);
vmo->vo_ctrl = NULL;
vmo->vo_key = 0;
object_unref(&ctrl->vc_base);
return KERN_OK;
}
static void wait_for_reply( static void wait_for_reply(
struct vm_controller *ctrl, struct vm_controller *ctrl,
struct page_request *req, struct page_request *req,
@@ -221,7 +255,7 @@ static void wait_for_reply(
static void fulfill_requests( static void fulfill_requests(
struct vm_controller *ctrl, struct vm_controller *ctrl,
struct vm_object *obj, equeue_key_t object,
off_t offset, off_t offset,
size_t length, size_t length,
kern_status_t result) kern_status_t result)
@@ -242,7 +276,7 @@ static void fulfill_requests(
match = true; match = true;
} }
if (req->req_object != obj) { if (req->req_object != object) {
match = false; match = false;
} }
@@ -280,7 +314,7 @@ kern_status_t vm_controller_supply_pages(
src_offset, src_offset,
count, count,
NULL); NULL);
fulfill_requests(ctrl, dst, dst_offset, count, status); fulfill_requests(ctrl, dst->vo_key, dst_offset, count, status);
return status; return status;
} }

View File

@@ -15,7 +15,7 @@
(p) += VM_PAGE_SIZE; \ (p) += VM_PAGE_SIZE; \
} }
static kern_status_t vm_object_cleanup(struct object *obj, struct queue *q) static kern_status_t vm_object_cleanup(struct object *obj)
{ {
struct vm_object *vmo = vm_object_cast(obj); struct vm_object *vmo = vm_object_cast(obj);
struct btree_node *cur = btree_first(&vmo->vo_pages); struct btree_node *cur = btree_first(&vmo->vo_pages);
@@ -29,6 +29,13 @@ static kern_status_t vm_object_cleanup(struct object *obj, struct queue *q)
cur = next; cur = next;
} }
if (vmo->vo_ctrl) {
unsigned long flags;
vm_controller_lock_irqsave(vmo->vo_ctrl, &flags);
vm_controller_detach_object(vmo->vo_ctrl, vmo);
vm_controller_unlock_irqrestore(vmo->vo_ctrl, flags);
}
return KERN_OK; return KERN_OK;
} }
@@ -281,10 +288,12 @@ static struct vm_page *alloc_page(struct vm_object *vo, off_t offset)
void *page_buf = vm_page_get_vaddr(page); void *page_buf = vm_page_get_vaddr(page);
memset(page_buf, 0x0, vm_page_get_size_bytes(page)); memset(page_buf, 0x0, vm_page_get_size_bytes(page));
tracek("vm-object: [%s] alloc offset %zx -> page %zx", #if 0
printk("vm-object: [%s] alloc offset %zx -> page %zx",
vo->vo_name, vo->vo_name,
offset, offset,
vm_page_get_paddr(page)); vm_page_get_paddr(page));
#endif
page->p_vmo_offset = offset; page->p_vmo_offset = offset;
vo->vo_pages.b_root = &page->p_bnode; vo->vo_pages.b_root = &page->p_bnode;
btree_insert_fixup(&vo->vo_pages, &page->p_bnode); btree_insert_fixup(&vo->vo_pages, &page->p_bnode);
@@ -364,12 +373,13 @@ static kern_status_t request_page(
struct vm_controller *ctrl = vo->vo_ctrl; struct vm_controller *ctrl = vo->vo_ctrl;
struct page_request req = {0}; struct page_request req = {0};
req.req_status = PAGE_REQUEST_PENDING; req.req_status = PAGE_REQUEST_PENDING;
req.req_type = PAGE_REQUEST_READ;
req.req_offset = offset; req.req_offset = offset;
req.req_length = vm_page_order_to_bytes(VM_PAGE_4K); req.req_length = vm_page_order_to_bytes(VM_PAGE_4K);
req.req_sender = current_thread(); req.req_sender = current_thread();
object_ref(&vo->vo_base); object_ref(&vo->vo_base);
req.req_object = vo; req.req_object = vo->vo_key;
vm_object_unlock_irqrestore(vo, *irq_flags); vm_object_unlock_irqrestore(vo, *irq_flags);
vm_controller_lock_irqsave(ctrl, irq_flags); vm_controller_lock_irqsave(ctrl, irq_flags);
@@ -392,6 +402,7 @@ struct vm_page *vm_object_get_page(
enum vm_object_flags flags, enum vm_object_flags flags,
unsigned long *irq_flags) unsigned long *irq_flags)
{ {
offset &= ~VM_PAGE_MASK;
if (!vo->vo_ctrl && (flags & VMO_ALLOCATE_MISSING_PAGE)) { if (!vo->vo_ctrl && (flags & VMO_ALLOCATE_MISSING_PAGE)) {
return alloc_page(vo, offset); return alloc_page(vo, offset);
} }