rewrite ipc to use vaddress instead of paddr

This commit is contained in:
2026-02-28 23:15:41 +01:00
parent 770e77769a
commit 11c01ab7f6
3 changed files with 46 additions and 41 deletions

View File

@@ -132,9 +132,8 @@ namespace Syscall {
}
void exit(Vault &vault) {
unmap(vault, (void*) 0x4000, 512 );
vault.scheduler.exit();
unmap(vault, (void*) 0x4000000, 512 );
}
void kill(Vault &vault, size_t pid){
@@ -175,24 +174,27 @@ namespace Syscall {
}
bool copy_from_phys(Vault& vault, uintptr_t phys_ptr, void* virt_ptr, size_t size) {
size_t offset = Page::offset(phys_ptr);
bool copy_from_phys(Vault& vault, uintptr_t src_paddr, void* dest_vaddr, size_t size) {
size_t offset = Page::offset(src_paddr);
size_t total_size = size + offset;
four_lvl_paging_t* search_table = vault.scheduler.active()->paging_tree;
void* virt_addr = getFreeVirtSpace(search_table->l4, (total_size/4096)+1); // page aligned pointer
if (virt_addr == nullptr) {
uintptr_t src_vaddr = (uintptr_t)getFreeVirtSpace(search_table->l4, (total_size/4096)+1); // page aligned pointer
if (src_vaddr == 0) {
return false;
}
src_vaddr += offset;
for(uint8_t i =0; i<size/4096 +1; i++){
//TODO actually use loop
setMapping((uintptr_t)virt_addr, (void*)phys_ptr, vault.scheduler.active()->paging_tree->l4);
setMapping(src_vaddr+(uintptr_t)(i*4096), (void*)(src_paddr+(uintptr_t)(i*4096)), search_table->l4);
}
memcpy(dest_vaddr, (void*)src_vaddr, size);
for(uint8_t i =0; i<size/4096 +1; i++){
setMapping((uintptr_t)(src_vaddr), 0, search_table->l4);
}
memcpy(virt_ptr, (void*)((uintptr_t)(virt_addr)+ (uintptr_t)offset), size);
setMapping((uintptr_t)(virt_addr), 0, search_table->l4);
return true;
}
@@ -202,16 +204,15 @@ bool send(Vault& v, int pid, const void* sbuffer, size_t ssize, void* rbuffer, s
Thread* current_thread = v.scheduler.active();
Thread* target_thread = v.thread_list[pid];
uintptr_t sbuffer_ptr = isMapped((uintptr_t)sbuffer,v.scheduler.active()->paging_tree->l4) + ((uintptr_t)sbuffer&0xFFF);
IpcStruct msg {
.ptr = sbuffer_ptr,
.size = ssize,
.pid = current_thread->id,
.is_answer = false,
.queue_link = nullptr
};
//uintptr_t sbuffer_ptr = isMapped((uintptr_t)sbuffer,v.scheduler.active()->paging_tree->l4) + ((uintptr_t)sbuffer&0xFFF);
IpcStruct msg = {
.ptr = (uintptr_t)sbuffer,
.size = ssize,
.pid = current_thread->id,
.is_answer = false,
.queue_link = nullptr
};
target_thread->ipc_queue.enqueue(msg);
target_thread->ipc_sem.v(v);
@@ -239,7 +240,7 @@ int receive(Vault& v, void* buffer, size_t size) {
// Warte auf Nachricht
//if (thread->ipc_queue.is_empty()) {
thread->ipc_sem.p(v);
thread->ipc_sem.p(v);
//}
IpcStruct* ipc = thread->ipc_queue.first();
@@ -247,9 +248,13 @@ int receive(Vault& v, void* buffer, size_t size) {
size_t copy_len = (size < ipc->size) ? size:ipc->size;
// Buffer holen
if (!copy_from_phys(v, ipc->ptr, buffer, copy_len)) {
return -3;
}
for(uint32_t i=0; i<(copy_len/4096)+1; i++){
uintptr_t paddr = isMapped((ipc->ptr)+(i*4096), v.thread_list[ipc->pid]->paging_tree->l4);
if (!copy_from_phys(v, paddr, buffer, copy_len)) {
return -3;
}
}
return ipc->pid;
}