31 static inline void *set_new_page(
struct pgte *pgt,
int page_idx)
36 assert(page &&
"Failed to allocate page");
39 assert(page &&
"Failed to do page mapping");
44 assert(page &&
"Failed to do unmapping");
53 static inline void update_rpgd(
void *_p,
uint32_t vma)
68 assert(kvma &&
"KVMA is not valid");
69 kernel_start =
strtoul(kvma, &endptr, 16);
71 page = (
unsigned long)_p;
72 dbg_printf(
"Update vma: %p, page: %p (kvma:%p)\n", vma, page, kernel_start);
75 assert(tcb &&
"Invalid tcb");
78 assert(arch_data &&
"Invalid arch data");
80 TO_IDX(page, pgt_idx, page_idx);
81 rpgd = arch_data->
rpgd;
82 assert(rpgd &&
"rPGD is not valid");
84 if (kernel_start <= vma) {
86 struct pgde *root_rpgd;
90 assert(root_tcb &&
"root TCB is not valid");
93 root_rpgd = root_arch_data->
rpgd;
94 assert(root_rpgd &&
"rPGD is not valid");
96 if (!root_rpgd[pgt_idx].p) {
98 assert(pgt &&
"Failed to allocate rpgt");
102 root_rpgd[pgt_idx].
p = 1;
103 root_rpgd[pgt_idx].
rw = 1;
106 pgt = (
struct pgte *)(root_rpgd[pgt_idx].pt_base * pagesize);
108 }
else if (!rpgd[pgt_idx].p) {
110 assert(pgt &&
"Failed to allocate table");
116 rpgd[pgt_idx].
rw = 1;
119 assert(!pgt[page_idx].p &&
"rpgd, page exists");
122 pgt[page_idx].
rw = 1;
126 static inline void set_new_pgt(
struct pgde *pgd,
int pgt_idx)
129 struct pgde *root_pgd;
139 assert(kvma &&
"kvma is not valid");
141 kernel_start =
strtoul(kvma, &endptr, 16);
142 TO_IDX(kernel_start, kpgt_idx, kpage_idx);
145 assert(tcb &&
"TCB is not valid");
152 assert(arch_data &&
"arch_data is not valid");
155 assert(root_pgd &&
"failed to do onetime mapping");
158 if (pgt_idx >= kpgt_idx && root_pgd != pgd) {
159 if (root_pgd[pgt_idx].p) {
166 assert(page &&
"Failed to allocate");
169 assert(page &&
"Failed to do mapping");
174 assert(page &&
"Failed to do unmapping");
181 if (pgt_idx >= kpgt_idx && root_pgd != pgd) {
182 if (!root_pgd[pgt_idx].p) {
183 root_pgd[pgt_idx].
p = 1;
184 root_pgd[pgt_idx].
rw = 1;
190 assert(root_pgd &&
"PGD is not valid");
193 static inline void handle_write_error_for_page(
struct pgte *pgt,
int page_idx)
199 assert(page &&
"page is not valid");
207 pgt[page_idx].
rw = 1;
209 assert(pgt &&
"Failed to do onetime unmapping");
222 assert(new_page &&
"Failed to allocate a page frame");
225 assert(new_page &&
"Failed to do onetime mapping");
228 assert(page &&
"Failed to do onetime mapping");
233 assert(new_page &&
"Failed to do unmapping");
236 assert(page &&
"Failed to do unmapping");
238 pgt[page_idx].
rw = 1;
242 static inline void handle_write_error_for_table(
struct pgde *pgd,
struct pgte *pgt,
int pgt_idx)
244 struct pgte *new_pgt;
250 assert(pgt &&
"Failed to do onetime mapping");
257 assert(pgt &&
"Failed to do unmapping");
264 assert(new_pgt &&
"Failed to allocate a page frame");
267 assert(new_pgt &&
"Failed to do onetime mapping");
270 assert(pgt &&
"Failed to onetime mapping");
278 assert(new_pgt &&
"Failed to do unmapping");
281 assert(pgt &&
"Failed to do unmapping");
300 assert(tcb &&
"Invalid TCB");
303 assert(reg &&
"Invalid register");
306 assert(arch_data &&
"Invalid arch data");
309 assert(pgd &&
"Failed to do onetime mapping");
311 asm volatile (
"mov %%cr2, %0":
"=a"(fault_vma):);
313 TO_IDX(fault_vma, pgt_idx, page_idx);
315 if (!pgd[pgt_idx].p) {
317 set_new_pgt(pgd, pgt_idx);
318 }
else if (!pgd[pgt_idx].rw) {
322 panic(
"Unhnadled exception");
326 handle_write_error_for_table(pgd, pgt, pgt_idx);
333 assert(pgt &&
"pgt is not valid");
335 if (pgt[page_idx].p) {
338 panic(
"Unhandled exception");
343 handle_write_error_for_page(pgt, page_idx);
359 page = set_new_page(pgt, page_idx);
360 update_rpgd(page, fault_vma);
364 assert(pgt &&
"pgt is not valid");