create copys of appcode in thread constructor

This commit is contained in:
2026-02-28 18:01:15 +01:00
parent 1acc652446
commit 4976ea91be
9 changed files with 265 additions and 180 deletions

View File

@@ -51,39 +51,39 @@ static void printContext(const InterruptContext *context) {
#include "syscall/skeleton.h"
[[gnu::interrupt]] static void handle_page_fault(InterruptContext *context,
uint64_t error) {
DBG << "Page fault encountered at linear address " << hex
<< Core::CR<2>::read() << endl
<< PageFaultError(error) << endl;
four_lvl_paging_t* paging_tree = Guard::enter().vault().scheduler.active()->paging_tree ;
asm volatile("mov %%cr3, %0" : "=r"(paging_tree));
uint64_t error) {
DBG << "Page fault encountered at linear address " << hex
<< Core::CR<2>::read() << endl
<< PageFaultError(error) << endl;
//four_lvl_paging_t* paging_tree = Guard::enter().vault().scheduler.active()->paging_tree ;
//asm volatile("mov %%cr3, %0" : "=r"(paging_tree));
uintptr_t virt = 0;
asm volatile("mov %%cr2, %0" : "=r"(virt));
if (PageFaultError(error).present && PageFaultError(error).write) {
uintptr_t pf =isMapped(virt, paging_tree);
uint64_t pf_number = uint64_t (pf >> 12 );
//uintptr_t virt = 0;
//asm volatile("mov %%cr2, %0" : "=r"(virt));
//if (PageFaultError(error).present && PageFaultError(error).write) {
// uintptr_t pf =isMapped(virt, paging_tree);
// uint64_t pf_number = uint64_t (pf >> 12 );
if(PageFrameAllocator::PageFrames[pf_number].ref_count==1){
setMapping(virt, (void*) pf , paging_tree, true);
// if(PageFrameAllocator::PageFrames[pf_number].ref_count==1){
// setMapping(virt, (void*) pf , paging_tree, true);
}else{
// }else{
uintptr_t page = (uintptr_t) PageFrameAllocator::alloc(false);
//Syscall::Skeleton::map(page, 4096);
setMapping(virt, (void*) page , paging_tree, true);
// uintptr_t page = (uintptr_t) PageFrameAllocator::alloc(false);
// //Syscall::Skeleton::map(page, 4096);
// setMapping(virt, (void*) page , paging_tree, true);
memcpy( (void*)page, (void*)virt, 4096);
Syscall::Skeleton::invlpg(virt);
}
// memcpy( (void*)page, (void*)virt, 4096);
// Syscall::Skeleton::invlpg(virt);
// }
}
//}
//Syscall::Skeleton::exit(Guard::enter().vault());
//kernelpanic("Page fault!");
Syscall::Skeleton::exit(Guard::enter().vault());
//kernelpanic("Page fault!");
}
/*! \brief Assembly interrupt handler for the keyboard.

View File

@@ -56,8 +56,8 @@ extern "C" int main() {
PageFrameAllocator::stats();
Multiboot::Module* initrd = Multiboot::getModule(0);
DBG << "initrd address: " << hex << initrd->getStartAddress() << endl << "initrd size: " << initrd->getSize() << endl;
memcpy((void *)0x4000000, initrd->getStartAddress(), initrd->getSize());
mark_pageframes(0x4000000, 0x4000000 + initrd->getSize(), false);
//memcpy((void *)0x4000000, initrd->getStartAddress(), initrd->getSize());
//mark_pageframes(0x4000000, 0x4000000 + initrd->getSize(), false);
write_identity_map(identity_table, KERNEL_MEMORY_BORDER);
//create_basic_page_table(&paging_tree, identity_table);
@@ -88,8 +88,8 @@ extern "C" int main() {
uint32_t (*apps_header)[1024] = (uint32_t (*)[1024])Multiboot::getModule(0)->getStartAddress();
uint64_t offset = 1;
for(uint16_t i = 1; i <= (*apps_header)[0]; i++){
uint32_t appsize = (*apps_header)[i];
uintptr_t appstart = (0x4000+offset)<<12;
uint64_t appsize = (*apps_header)[i];
uintptr_t appstart = (uintptr_t)apps_header+(offset<<12);
DBG << "app " << i << " size " << appsize << " at " << appstart << endl;
Thread* thread_ptr= new Thread(false, (void*)0x4000000, (void*)appstart, (appsize/4096)+1); //TODO fix edgecase on size=4096

View File

@@ -3,10 +3,22 @@
#include "pageframealloc.h"
#include "../utils/string.h"
void invlpg(uintptr_t virt_addr) {
asm volatile("invlpg (%0)" : : "r" (virt_addr) : "memory");
}
void load_cr3( void* cr3_value ) {
asm volatile("mov %0, %%cr3" :: "r"((uint64_t)cr3_value) : "memory");
}
uintptr_t get_cr3() {
uint64_t cr3;
__asm__ __volatile__("mov %%cr3, %0" : "=r"(cr3));
return cr3;
}
void write_identity_map(pagetable_t* identity_table, uint64_t size){
for(uintptr_t i=0; i<size/4096; i++){
identity_table[i/512].entries[i%512] = {
@@ -18,6 +30,41 @@ void write_identity_map(pagetable_t* identity_table, uint64_t size){
}
}
bool copy_page(pagetable_t* l4, uintptr_t src_paddr, void* dest_vaddr) {
//allocate frame and map in given address space
//if no mapping is present
uintptr_t dest_paddr = isMapped((uintptr_t)dest_vaddr, l4);
if(dest_paddr == 0){
dest_paddr = (uintptr_t)PageFrameAllocator::alloc(false);
if(dest_paddr == 0)
return false;
setMapping((uintptr_t)dest_vaddr, (void*)dest_paddr, l4);
}
//get temp page1
void* temp_dest_vaddr = getFreeVirtSpace((pagetable_t*)get_cr3(), 1);
if(temp_dest_vaddr == nullptr)
return false;
setMapping((uintptr_t)temp_dest_vaddr, (void*)dest_paddr, (pagetable_t*)get_cr3());
//get temp page2
void* temp_src_vaddr = getFreeVirtSpace((pagetable_t*)get_cr3(), 1);
if(temp_src_vaddr == nullptr || temp_dest_vaddr == nullptr)
return false;
setMapping((uintptr_t) temp_src_vaddr, (void*)src_paddr, (pagetable_t*)get_cr3());
//copy
memcpy(temp_dest_vaddr, temp_src_vaddr, 4096);
//unmap temp pages
setMapping((uintptr_t)(temp_src_vaddr), 0, (pagetable_t*)get_cr3());
setMapping((uintptr_t)(temp_dest_vaddr), 0, (pagetable_t*)get_cr3());
return true;
}
void create_basic_page_table(four_lvl_paging_t* table, pagetable_t* kernel_identity){
assert(table);
@@ -104,17 +151,19 @@ void create_basic_page_table(four_lvl_paging_t* table, pagetable_t* kernel_ident
kernel_identity[0].entries[0].present = 0;
}
uintptr_t isMapped(uintptr_t vaddr, four_lvl_paging_t* flpt){
uintptr_t isMapped(uintptr_t vaddr, pagetable_t* l4){
uint16_t l4Index = (vaddr>>39) & 0x1FF;
uint16_t l3Index = (vaddr>>30) & 0x1FF;
uint16_t l2Index = (vaddr>>21) & 0x1FF;
uint16_t l1Index = (vaddr>>12) & 0x1FF;
if(flpt->l4->entries[l4Index].present){
pagetable_t* lvl3 = (pagetable_t*)(flpt->l4->entries[l4Index].address<<12);
if(l4->entries[l4Index].present){
pagetable_t* lvl3 = (pagetable_t*)(l4->entries[l4Index].address<<12);
if(lvl3->entries[l3Index].present){
pagetable_t* lvl2 = (pagetable_t*)(lvl3->entries[l3Index].address<<12);
if(lvl2->entries[l2Index].present){
if(lvl2->entries[l2Index].pat)
return (lvl2->entries[l2Index].address<<21)+(l1Index<<12);
pagetable_t* lvl1 = (pagetable_t*)(lvl2->entries[l2Index].address<<12);
if(lvl1->entries[l1Index].present)
return lvl1->entries[l1Index].address<<12;
@@ -124,17 +173,17 @@ uintptr_t isMapped(uintptr_t vaddr, four_lvl_paging_t* flpt){
return 0;
}
void* getFreeVirtSpace(four_lvl_paging_t* search_table, uint8_t num_pages){
uint32_t start_v = 0x4000;
uint32_t stop_v = 0x6000;
static uint32_t next_start_v = 0x4000;
//static uint32_t next_start_v = start_v;
void* getFreeVirtSpace(pagetable_t* l4, uint8_t num_pages){
uint64_t start_v = 0x4000;
uint64_t stop_v = 0x200000;
static uint64_t next_start_v = 0x4000;
//static uint64_t next_start_v = start_v;
//start from last found address
for (uint32_t v=next_start_v; v<stop_v; v++) {
for (uint64_t v=next_start_v; v<stop_v; v++) {
bool space_is_free = true;
for(uint32_t i=0; i<num_pages; i++){
if(isMapped((uintptr_t)(v+i)<<12, search_table)) {
if(isMapped((uintptr_t)(v+i)<<12, l4)) {
space_is_free = false;
}
}
@@ -144,10 +193,10 @@ void* getFreeVirtSpace(four_lvl_paging_t* search_table, uint8_t num_pages){
}
}
//start again from the real start address
for (uint32_t v=start_v; v<next_start_v; v++) {
for (uint64_t v=start_v; v<next_start_v; v++) {
bool space_is_free = true;
for(uint32_t i=0; i<num_pages; i++){
if(isMapped((uintptr_t)(v+i)<<12, search_table)) {
if(isMapped((uintptr_t)(v+i)<<12, l4)) {
space_is_free = false;
}
}
@@ -160,16 +209,16 @@ void* getFreeVirtSpace(four_lvl_paging_t* search_table, uint8_t num_pages){
}
void setMapping(uintptr_t vaddr, void* frame, four_lvl_paging_t* flpt, bool write){
void setMapping(uintptr_t vaddr, void* frame, pagetable_t* l4, bool write, bool remap){
uint16_t l4Index = (vaddr>>39) & 0x1FF;
uint16_t l3Index = (vaddr>>30) & 0x1FF;
uint16_t l2Index = (vaddr>>21) & 0x1FF;
uint16_t l1Index = (vaddr>>12) & 0x1FF;
if(!(flpt->l4->entries[l4Index].present)){
if(!(l4->entries[l4Index].present)){
pagetable_t* newl3 = (pagetable_t*)PageFrameAllocator::alloc(true);
memset(newl3, 0, 4096);
flpt->l4->entries[l4Index] = {
l4->entries[l4Index] = {
.present = 1,
.write = 1,
.user = 1,
@@ -177,7 +226,7 @@ void setMapping(uintptr_t vaddr, void* frame, four_lvl_paging_t* flpt, bool writ
};
}
pagetable_t* lvl3 = (pagetable_t*)(flpt->l4->entries[l4Index].address<<12);
pagetable_t* lvl3 = (pagetable_t*)(l4->entries[l4Index].address<<12);
if(!(lvl3->entries[l3Index].present)){
pagetable_t* newl2 = (pagetable_t*)PageFrameAllocator::alloc(true);
memset(newl2, 0, 4096);
@@ -204,7 +253,8 @@ void setMapping(uintptr_t vaddr, void* frame, four_lvl_paging_t* flpt, bool writ
pagetable_t* lvl1 = (pagetable_t*)(lvl2->entries[l2Index].address<<12);
if(frame){
assert(!(lvl1->entries[l1Index].present)); // should not be present, bc its a new mapping
if(!remap)
assert(!(lvl1->entries[l1Index].present)); // should not be present, bc its a new mapping
lvl1->entries[l1Index] = {
.present = 1,
.write = write,
@@ -212,8 +262,11 @@ void setMapping(uintptr_t vaddr, void* frame, four_lvl_paging_t* flpt, bool writ
.address = (uintptr_t)frame >> 12
};
}
else //unmap if nullptr
else{ //unmap if nullptr
lvl1->entries[l1Index].present = 0;
if((void*)l4 == (void*)get_cr3())
invlpg(vaddr);
}
}
void copy_pagetable(four_lvl_paging_t* parent_table, four_lvl_paging_t* child_table){
@@ -233,17 +286,17 @@ void copy_pagetable(four_lvl_paging_t* parent_table, four_lvl_paging_t* child_ta
((uintptr_t)i1<<12) ;
if(vaddr < 0x4000000)
continue;
if(!isMapped(vaddr, child_table)){
if(!isMapped(vaddr, child_table->l4)){
//only copy if not part of basic pagetable
assert( (i3>0) || (i2>=32)); //assert user memory
void* frame = PageFrameAllocator::alloc(false);
setMapping(vaddr, frame, child_table);
setMapping(vaddr, frame, child_table->l4);
//map to local space for copy
void* dest = getFreeVirtSpace(parent_table, 1);
setMapping((uintptr_t)dest, frame, parent_table);
void* dest = getFreeVirtSpace(parent_table->l4, 1);
setMapping((uintptr_t)dest, frame, parent_table->l4);
memcpy(dest, (void*)vaddr, 4096);
setMapping((uintptr_t)dest, 0, parent_table);
setMapping((uintptr_t)dest, 0, parent_table->l4);
}
}
}

View File

@@ -50,10 +50,11 @@ typedef struct {
void write_identity_map(pagetable_t* identity_table, uint64_t size);
void create_basic_page_table(four_lvl_paging_t* table, pagetable_t* kernel_identity);
void load_cr3(void* cr3_value);
void setMapping(uintptr_t vaddr, void* frame, four_lvl_paging_t* flpt, bool write=true);
uintptr_t isMapped(uintptr_t vaddr, four_lvl_paging_t* flpt);
void* getFreeVirtSpace(four_lvl_paging_t* search_table, uint8_t num_pages);
void setMapping(uintptr_t vaddr, void* frame, pagetable_t* l4, bool write=true, bool remap=false);
uintptr_t isMapped(uintptr_t vaddr, pagetable_t* l4);
void* getFreeVirtSpace(pagetable_t* l4, uint8_t num_pages);
void copy_pagetable(four_lvl_paging_t* parent_table, four_lvl_paging_t* child_table);
bool copy_page(pagetable_t* l4, uintptr_t src_paddr, void* dest_vaddr);
//typedef struct {
// same

View File

@@ -145,7 +145,7 @@ namespace Syscall {
//pagetable_t* subbytable = vault->scheduler.active()->subtable;
four_lvl_paging_t* search_table = vault->scheduler.active()->paging_tree;
void* ptr = getFreeVirtSpace(search_table, num_pages);
void* ptr = getFreeVirtSpace(search_table->l4, num_pages);
if (ptr == nullptr) {
return nullptr;
}
@@ -154,7 +154,7 @@ namespace Syscall {
for (size_t i = 0; i < num_pages; ++i) {
// allocate each page with allocator
void* frame = PageFrameAllocator::alloc(false);
setMapping((uintptr_t)ptr, frame, search_table);
setMapping((uintptr_t)ptr, frame, search_table->l4);
}
return ptr;
}
@@ -166,8 +166,8 @@ namespace Syscall {
memset(start, 0, size);
for(uint32_t i=startIndex; i<(startIndex+NumberOfPages); i++){
uintptr_t frame = isMapped(i<<12, search_table);
setMapping(i<<12, 0, search_table);
uintptr_t frame = isMapped(i<<12, search_table->l4);
setMapping(i<<12, 0, search_table->l4);
PageFrameAllocator::free(frame);
invlpg(i<<12);
}
@@ -180,17 +180,18 @@ bool copy_from_phys(Vault& vault, uintptr_t phys_ptr, void* virt_ptr, size_t siz
size_t total_size = size + offset;
four_lvl_paging_t* search_table = vault.scheduler.active()->paging_tree;
void* virt_addr = getFreeVirtSpace(search_table, (total_size/4096)+1); // page aligned pointer
void* virt_addr = getFreeVirtSpace(search_table->l4, (total_size/4096)+1); // page aligned pointer
if (virt_addr == nullptr) {
return false;
}
for(uint8_t i =0; i<size/4096 +1; i++){
setMapping((uintptr_t)virt_addr, (void*)phys_ptr, vault.scheduler.active()->paging_tree);
//TODO actually use loop
setMapping((uintptr_t)virt_addr, (void*)phys_ptr, vault.scheduler.active()->paging_tree->l4);
}
memcpy(virt_ptr, (void*)((uintptr_t)(virt_addr)+ (uintptr_t)offset), size);
setMapping((uintptr_t)(virt_addr), 0, search_table);
setMapping((uintptr_t)(virt_addr), 0, search_table->l4);
return true;
}
@@ -201,7 +202,7 @@ bool send(Vault& v, int pid, const void* sbuffer, size_t ssize, void* rbuffer, s
Thread* current_thread = v.scheduler.active();
Thread* target_thread = v.thread_list[pid];
uintptr_t sbuffer_ptr = isMapped((uintptr_t)sbuffer,v.scheduler.active()->paging_tree) + ((uintptr_t)sbuffer&0xFFF);
uintptr_t sbuffer_ptr = isMapped((uintptr_t)sbuffer,v.scheduler.active()->paging_tree->l4) + ((uintptr_t)sbuffer&0xFFF);
IpcStruct msg {
.ptr = sbuffer_ptr,
.size = ssize,
@@ -267,7 +268,7 @@ bool reply(Vault& v, const void* buffer, size_t size) {
Thread* other_thread = v.thread_list[ipc->pid];
if (other_thread == nullptr) return false;
uintptr_t phys_ptr = isMapped((uintptr_t)buffer, current_thread->paging_tree );
uintptr_t phys_ptr = isMapped((uintptr_t)buffer, current_thread->paging_tree->l4 );
ipc->ptr = phys_ptr + ((uintptr_t)buffer & 0xFFF);
ipc->size = size;
ipc->pid = current_thread->id;
@@ -280,19 +281,19 @@ bool reply(Vault& v, const void* buffer, size_t size) {
}
void copy_stack(Thread* parent, Thread* child){
uintptr_t dest_frame = isMapped((uintptr_t)child->StackPointer.user, child->paging_tree);
void* dest_vaddr = getFreeVirtSpace(parent->paging_tree, 1);
setMapping((uintptr_t)dest_vaddr, (void*)dest_frame, parent->paging_tree);
uintptr_t dest_frame = isMapped((uintptr_t)child->StackPointer.user, child->paging_tree->l4);
void* dest_vaddr = getFreeVirtSpace(parent->paging_tree->l4, 1);
setMapping((uintptr_t)dest_vaddr, (void*)dest_frame, parent->paging_tree->l4);
memcpy(dest_vaddr, (void*)0x6000000/*((uintptr_t)parent->StackPointer.user & ~0xFFF)*/, 4096);
setMapping((uintptr_t)dest_vaddr, 0, parent->paging_tree);
setMapping((uintptr_t)dest_vaddr, 0, parent->paging_tree->l4);
}
int fork(Vault &vault, InterruptContext *user_context) {
Thread* parent = vault.scheduler.active();
Thread* child = new Thread(false, (void*)user_context->ip, parent->code_paddr, parent->code_pagenum);
copy_pagetable(parent->paging_tree, child->paging_tree);
//copy_pagetable(parent->paging_tree, child->paging_tree);
copy_stack(parent, child);
child->StackPointer.user = (void*)user_context->sp;

View File

@@ -19,7 +19,7 @@
*/
class IdleThread : public Thread {
public:
explicit IdleThread() : Thread(true, 0) {}
explicit IdleThread() : Thread(true, 0, 0, 0) {}
/*! \brief Wait for a thread to become ready and sleep in the meantime.
*

View File

@@ -38,14 +38,14 @@ void Thread::map_app(void* code_paddr, uint16_t code_page_num, void* stack_vaddr
memset(paging_tree, 0, 4096);
create_basic_page_table(paging_tree, &identity_table);
// insert app code pages at 0x4000000
// app code pages at 0x4000000
for(uintptr_t i=0; i<(code_page_num*4096); i+=4096){
setMapping(0x4000000+i, (void*)((uintptr_t)code_paddr+i), paging_tree);
copy_page(paging_tree->l4, (uintptr_t)code_paddr+i, (void*)(0x4000000+i));
}
for(uintptr_t i=0; i<(stack_page_num*4096); i+=4096){
uintptr_t user_stackframe_p = ((uintptr_t)PageFrameAllocator::alloc(false));
setMapping((uintptr_t)(stack_vaddr)+i, (void*)((uintptr_t)user_stackframe_p), paging_tree);
setMapping((uintptr_t)(stack_vaddr)+i, (void*)((uintptr_t)user_stackframe_p), paging_tree->l4);
}
}