#include "../syscall/skeleton.h" #include "../debug/kernelpanic.h" #include "../debug/output.h" #include "../device/textstream.h" #include "../interrupt/guard.h" #include "../memory/page.h" #include "../sync/semaphore.h" #include "../thread/scheduler.h" #include "../memory/pageframealloc.h" void *operator new(size_t, void *); //#include "../user/app1/appl.h" //extern Application apps[]; uint8_t mapNumber = 0; namespace Syscall { namespace Skeleton { void invlpg(uintptr_t virt_addr) { asm volatile("invlpg (%0)" : : "r" (virt_addr) : "memory"); } size_t test(Vault &vault, size_t p1, size_t p2, size_t p3, size_t p4, size_t p5) { (void)vault; vault.kout << "test(" << p1 << ", " << p2 << ", " << p3 << ", " << p4 << ", " << p5 << ");" << endl; return 0xdeadbeef; } int getpid(Vault &vault) { Thread *me = vault.scheduler.active(); //unsigned id = 0; //while (&apps[id++] != me); // TODO find better pid source //return id; return me->id; } size_t write(Vault &vault, uint32_t id, const void *buffer, size_t size, int x, int y) { (void)id; TextStream* out; switch (id) { case 1: out = &vault.kout; break; case 2: out = &dout; break; default: out = &vault.kout; } int dummy; if(x == -1 && y != -1) out->getPos(x, dummy); if(x != -1 && y == -1) out->getPos(dummy, y); if(x == -1 && y == -1) out->getPos(x, y); out->setPos(x, y); for(size_t i = 0; iflush(); return 0; } size_t read(Vault &vault, uint32_t id, void *buf, size_t len) { (void)id; size_t read_cnt = 0; while(read_cnt < len){ Key key; vault.keys_sem.p(vault); vault.keys.consume(key); if(key.valid()) ((char*)buf)[read_cnt++] = key.ascii(); else break; } return read_cnt; } void sleep(Vault &vault, size_t ms) { vault.bellringer.sleep(vault, ms); } bool sem_init(Vault &vault, size_t id, uint32_t value) { if (id >= vault.MAX_SEMS) { return false; // out‐of‐range id } if (vault.sems[id].used==1){ return false; //already in use } vault.sems[id].counter=value; return true; } bool sem_destroy(Vault &vault, size_t id) { if (id >= vault.MAX_SEMS) { return false; // out‐of‐range id } if (vault.sems[id].used==0){ return false; //already in free } vault.sems[id].used=0; vault.sems[id].counter=0; return true; } bool sem_signal(Vault &vault, size_t id) { vault.sems[id].v(vault); return true; } bool sem_wait(Vault &vault, size_t id) { vault.sems[id].p(vault); return true; } void exit(Vault &vault) { unmap(vault, (void*) 0x4000, 512 ); vault.scheduler.exit(); } void kill(Vault &vault, size_t pid){ //vault.scheduler.kill(&apps[pid]); } uintptr_t isMapped(uintptr_t vaddr, four_lvl_paging_t* flpt){ uint16_t l4Index = (vaddr>>39) & 0x1FF; uint16_t l3Index = (vaddr>>30) & 0x1FF; uint16_t l2Index = (vaddr>>21) & 0x1FF; uint16_t l1Index = (vaddr>>12) & 0x1FF; if(flpt->l4->entries[l4Index].present){ pagetable_t* lvl3 = (pagetable_t*)(flpt->l4->entries[l4Index].address<<12); if(lvl3->entries[l3Index].present){ pagetable_t* lvl2 = (pagetable_t*)(lvl3->entries[l3Index].address<<12); if(lvl2->entries[l2Index].present){ pagetable_t* lvl1 = (pagetable_t*)(lvl2->entries[l2Index].address<<12); if(lvl1->entries[l1Index].present) return lvl1->entries[l1Index].address<<12; } } } return 0; } void* getFreeVirtSpace(four_lvl_paging_t* search_table, uint8_t num_pages){ uint32_t start_v = 0x4000; uint32_t stop_v = 0x6000; static uint32_t next_start_v = 0x4000; //static uint32_t next_start_v = start_v; //start from last found address for (uint32_t v=next_start_v; v>39) & 0x1FF; uint16_t l3Index = (vaddr>>30) & 0x1FF; uint16_t l2Index = (vaddr>>21) & 0x1FF; uint16_t l1Index = (vaddr>>12) & 0x1FF; if(!(flpt->l4->entries[l4Index].present)){ pagetable_t* newl3 = (pagetable_t*)PageFrameAllocator::alloc(true); memset(newl3, 0, 4096); flpt->l4->entries[l4Index] = { .present = 1, .write = 1, .user = 1, .address = (uintptr_t)newl3 >> 12 }; } pagetable_t* lvl3 = (pagetable_t*)(flpt->l4->entries[l4Index].address<<12); if(!(lvl3->entries[l3Index].present)){ pagetable_t* newl2 = (pagetable_t*)PageFrameAllocator::alloc(true); memset(newl2, 0, 4096); lvl3->entries[l3Index] = { .present = 1, .write = 1, .user = 1, .address = (uintptr_t)newl2 >> 12 }; } pagetable_t* lvl2 = (pagetable_t*)(lvl3->entries[l3Index].address<<12); if(!(lvl2->entries[l2Index].present)){ pagetable_t* newl1 = (pagetable_t*)PageFrameAllocator::alloc(true); memset(newl1, 0, 4096); lvl2->entries[l2Index] = { .present = 1, .write = 1, .user = 1, .address = (uintptr_t)newl1 >> 12 }; } pagetable_t* lvl1 = (pagetable_t*)(lvl2->entries[l2Index].address<<12); if(frame){ assert(!(lvl1->entries[l1Index].present)); // should not be present, bc its a new mapping lvl1->entries[l1Index] = { .present = 1, .write = 1, .user = 1, .address = (uintptr_t)frame >> 12 }; } else //unmap if nullptr lvl1->entries[l1Index].present = 0; } void* map(Vault *vault, size_t size) { size_t num_pages = (size + 4096 - 1) / 4096; //pagetable_t* subbytable = vault->scheduler.active()->subtable; four_lvl_paging_t* search_table = vault->scheduler.active()->paging_tree; void* ptr = getFreeVirtSpace(search_table, num_pages); if (ptr == nullptr) { return nullptr; } // map all used pages for (size_t i = 0; i < num_pages; ++i) { // allocate each page with allocator void* frame = PageFrameAllocator::alloc(false); setMapping((uintptr_t)ptr, frame, search_table); } return ptr; } int unmap(Vault &vault, void* start, size_t size) { uint32_t NumberOfPages = (size/4096); four_lvl_paging_t* search_table = vault.scheduler.active()->paging_tree; uint32_t startIndex = ((uintptr_t)start)>>12; memset(start, 0, size); for(uint32_t i=startIndex; i<(startIndex+NumberOfPages); i++){ uintptr_t frame = isMapped(i<<12, search_table); setMapping(i<<12, 0, search_table); PageFrameAllocator::free(frame); invlpg(i<<12); } return 0; } bool copy_from_phys(Vault& vault, uintptr_t phys_ptr, void* virt_ptr, size_t size) { Thread* current_thread = vault.scheduler.active(); size_t offset = Page::offset(phys_ptr); size_t total_size = size + offset; four_lvl_paging_t* search_table = vault.scheduler.active()->paging_tree; void* virt_addr = getFreeVirtSpace(search_table, (total_size/4096)+1); // page aligned pointer if (virt_addr == nullptr) { return false; } for(uint8_t i =0; ipaging_tree); } memcpy(virt_ptr, (void*)((uintptr_t)(virt_addr)+ (uintptr_t)offset), size); PageFrameAllocator::free((uintptr_t)(virt_addr)); return true; } bool send(Vault& v, int pid, const void* sbuffer, size_t ssize, void* rbuffer, size_t rsize) { Thread* current_thread = v.scheduler.active(); Thread* target_thread = v.thread_list[pid]; uintptr_t sbuffer_ptr = isMapped((uintptr_t)sbuffer,v.scheduler.active()->paging_tree) + ((uintptr_t)sbuffer&0xFFF); IpcStruct msg { .ptr = sbuffer_ptr, .size = ssize, .pid = current_thread->id, .is_answer = false, .queue_link = nullptr }; target_thread->ipc_queue.enqueue(msg); target_thread->ipc_sem.v(v); while (true) { current_thread->ipc_sem.p(v); if (msg.is_answer) { break; } DBG_VERBOSE << "" << endl; } // Kopiere Antwort if (!copy_from_phys(v, msg.ptr, rbuffer, rsize)) { return false; } return true; } int receive(Vault& v, void* buffer, size_t size) { // DBG_VERBOSE << "Receive syscall for thread " << dec << v.scheduler.active()->id << endl; Thread* thread = v.scheduler.active(); // Warte auf Nachricht if (thread->ipc_queue.is_empty()) { thread->ipc_sem.p(v); } IpcStruct* ipc = thread->ipc_queue.first(); if (ipc == nullptr) return -1; // Buffer holen if (!copy_from_phys(v, ipc->ptr, buffer, size)) { return -3; } return ipc->pid; } bool reply(Vault& v, const void* buffer, size_t size) { Thread* current_thread = v.scheduler.active(); IpcStruct* ipc = current_thread->ipc_queue.dequeue(); // if (!ipc || ipc->pid < 0 || static_cast(ipc->pid) >= v.thread_count) return false; Thread* other_thread = v.thread_list[ipc->pid]; if (other_thread == nullptr) return false; uintptr_t phys_ptr = isMapped((uintptr_t)buffer, current_thread->paging_tree ); ipc->ptr = phys_ptr + ((uintptr_t)buffer & 0xFFF); ipc->size = size; ipc->pid = current_thread->id; ipc->is_answer = true; // Sender aufwecken other_thread->ipc_sem.v(v); return true; } } // namespace Skeleton } // namespace Syscall