#include "../syscall/skeleton.h" #include "../debug/kernelpanic.h" #include "../debug/output.h" #include "../device/textstream.h" #include "../interrupt/guard.h" #include "../sync/semaphore.h" #include "../thread/scheduler.h" #include "../memory/pageframealloc.h" void *operator new(size_t, void *); //#include "../user/app1/appl.h" //extern Application apps[]; uint8_t mapNumber = 0; namespace Syscall { namespace Skeleton { void invlpg(uintptr_t virt_addr) { asm volatile("invlpg (%0)" : : "r" (virt_addr) : "memory"); } size_t test(Vault &vault, size_t p1, size_t p2, size_t p3, size_t p4, size_t p5) { (void)vault; vault.kout << "test(" << p1 << ", " << p2 << ", " << p3 << ", " << p4 << ", " << p5 << ");" << endl; return 0xdeadbeef; } int getpid(Vault &vault) { Thread *me = vault.scheduler.active(); //unsigned id = 0; //while (&apps[id++] != me); // TODO find better pid source //return id; return me->id; } size_t write(Vault &vault, uint32_t id, const void *buffer, size_t size, int x, int y) { (void)id; TextStream* out; switch (id) { case 1: out = &vault.kout; break; case 2: out = &dout; break; default: out = &vault.kout; } int dummy; if(x == -1 && y != -1) out->getPos(x, dummy); if(x != -1 && y == -1) out->getPos(dummy, y); if(x == -1 && y == -1) out->getPos(x, y); out->setPos(x, y); for(size_t i = 0; iflush(); return 0; } size_t read(Vault &vault, uint32_t id, void *buf, size_t len) { (void)id; size_t read_cnt = 0; while(read_cnt < len){ Key key; vault.keys_sem.p(vault); vault.keys.consume(key); if(key.valid()) ((char*)buf)[read_cnt++] = key.ascii(); else break; } return read_cnt; } void sleep(Vault &vault, size_t ms) { vault.bellringer.sleep(vault, ms); } bool sem_init(Vault &vault, size_t id, uint32_t value) { if (id >= vault.MAX_SEMS) { return false; // out‐of‐range id } if (vault.sems[id].used==1){ return false; //already in use } vault.sems[id].counter=value; return true; } bool sem_destroy(Vault &vault, size_t id) { if (id >= vault.MAX_SEMS) { return false; // out‐of‐range id } if (vault.sems[id].used==0){ return false; //already in free } vault.sems[id].used=0; vault.sems[id].counter=0; return true; } bool sem_signal(Vault &vault, size_t id) { vault.sems[id].v(vault); return true; } bool sem_wait(Vault &vault, size_t id) { vault.sems[id].p(vault); return true; } void exit(Vault &vault) { unmap(vault, (void*) 0x4000, 512 ); vault.scheduler.exit(); } void kill(Vault &vault, size_t pid){ //vault.scheduler.kill(&apps[pid]); } uintptr_t isMapped(uintptr_t vaddr, four_lvl_paging_t* flpt){ uint16_t l4Index = (vaddr>>39) & 0x1FF; uint16_t l3Index = (vaddr>>30) & 0x1FF; uint16_t l2Index = (vaddr>>21) & 0x1FF; uint16_t l1Index = (vaddr>>12) & 0x1FF; if(flpt->l4->entries[l4Index].present){ pagetable_t* lvl3 = (pagetable_t*)(flpt->l4->entries[l4Index].address<<12); if(lvl3->entries[l3Index].present){ pagetable_t* lvl2 = (pagetable_t*)(lvl3->entries[l3Index].address<<12); if(lvl2->entries[l2Index].present){ pagetable_t* lvl1 = (pagetable_t*)(lvl2->entries[l2Index].address<<12); if(lvl1->entries[l1Index].present) return lvl1->entries[l1Index].address<<12; } } } return 0; } void* getFreeVirtSpace(four_lvl_paging_t* search_table, uint8_t num_pages){ uint32_t start_v = 0x4000; uint32_t stop_v = 0x6000; static uint32_t next_start_v = 0x4000; //static uint32_t next_start_v = start_v; //start from last found address for (uint32_t v=next_start_v; v>39) & 0x1FF; uint16_t l3Index = (vaddr>>30) & 0x1FF; uint16_t l2Index = (vaddr>>21) & 0x1FF; uint16_t l1Index = (vaddr>>12) & 0x1FF; if(!(flpt->l4->entries[l4Index].present)){ pagetable_t* newl3 = (pagetable_t*)PageFrameAllocator::alloc(true); memset(newl3, 0, 4096); flpt->l4->entries[l4Index] = { .present = 1, .write = 1, .user = 1, .address = (uintptr_t)newl3 >> 12 }; } pagetable_t* lvl3 = (pagetable_t*)(flpt->l4->entries[l4Index].address<<12); if(!(lvl3->entries[l3Index].present)){ pagetable_t* newl2 = (pagetable_t*)PageFrameAllocator::alloc(true); memset(newl2, 0, 4096); lvl3->entries[l3Index] = { .present = 1, .write = 1, .user = 1, .address = (uintptr_t)newl2 >> 12 }; } pagetable_t* lvl2 = (pagetable_t*)(lvl3->entries[l3Index].address<<12); if(!(lvl2->entries[l2Index].present)){ pagetable_t* newl1 = (pagetable_t*)PageFrameAllocator::alloc(true); memset(newl1, 0, 4096); lvl2->entries[l2Index] = { .present = 1, .write = 1, .user = 1, .address = (uintptr_t)newl1 >> 12 }; } pagetable_t* lvl1 = (pagetable_t*)(lvl2->entries[l2Index].address<<12); if(frame){ assert(!(lvl1->entries[l1Index].present)); // should not be present, bc its a new mapping lvl1->entries[l1Index] = { .present = 1, .write = 1, .user = 1, .address = (uintptr_t)frame >> 12 }; } else //unmap if nullptr lvl1->entries[l1Index].present = 0; } void* map(Vault *vault, size_t size) { size_t num_pages = (size + 4096 - 1) / 4096; //pagetable_t* subbytable = vault->scheduler.active()->subtable; four_lvl_paging_t* search_table = vault->scheduler.active()->paging_tree; void* ptr = getFreeVirtSpace(search_table, num_pages); if (ptr == nullptr) { return nullptr; } // map all used pages for (size_t i = 0; i < num_pages; ++i) { // allocate each page with allocator void* frame = PageFrameAllocator::alloc(false); setMapping((uintptr_t)ptr, frame, search_table); } return ptr; } int unmap(Vault &vault, void* start, size_t size) { uint32_t NumberOfPages = (size/4096); four_lvl_paging_t* search_table = vault.scheduler.active()->paging_tree; uint32_t startIndex = ((uintptr_t)start)>>12; memset(start, 0, size); for(uint32_t i=startIndex; i<(startIndex+NumberOfPages); i++){ uintptr_t frame = isMapped(i<<12, search_table); setMapping(i<<12, 0, search_table); PageFrameAllocator::free(frame); invlpg(i); } return 0; } } // namespace Skeleton } // namespace Syscall