nckernel  0.1
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
page_fault_handler.c
Go to the documentation of this file.
1 #include <sys/types.h>
2 #include <stdio.h>
3 #include <time.h>
4 #include <semaphore.h>
5 #include <assert.h>
6 #include <pthread.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <unistd.h>
10 #include <stdlib.h>
11 #include <string.h>
12 
13 #include <object.h>
14 #include <list.h>
15 #include <thread.h>
16 #include <paging.h> /* cr3 */
17 #include <isr.h> /* ecode */
18 
19 #include <segment.h> /* For the IO_BITMAP_SIZE */
20 #include <x86.h> /* x86_arch_data */
21 #include <arch.h> /* halt */
22 #include <initm.h>
23 
24 #include <debug.h>
25 
26 #include <zone.h>
27 #include <page_frame.h>
28 
29 #include <onetime_map.h>
30 
31 static inline void *set_new_page(struct pgte *pgt, int page_idx)
32 {
33  void *page;
34 
35  page = page_frame_alloc(ZONE_NORMAL, 1);
36  assert(page && "Failed to allocate page");
37 
38  page = onetime_map(page);
39  assert(page && "Failed to do page mapping");
40 
41  memset(page, 0, sysconf(_SC_PAGESIZE));
42 
43  page = onetime_unmap(page);
44  assert(page && "Failed to do unmapping");
45 
46  pgt[page_idx].p = 1;
47  pgt[page_idx].rw = 1;
48  pgt[page_idx].page_base = ((uint32_t)page) / sysconf(_SC_PAGESIZE);
49 
50  return page;
51 }
52 
53 static inline void update_rpgd(void *_p, uint32_t vma)
54 {
55  struct thread *tcb;
56  struct x86_arch_data *arch_data;
57  int pgt_idx;
58  int page_idx;
59  struct pgte *pgt;
60  struct pgde *rpgd;
61  unsigned long page;
62  const unsigned long pagesize = sysconf(_SC_PAGESIZE);
63  uint32_t kernel_start;
64  const char *kvma;
65  char *endptr;
66 
67  kvma = getenv("kvma");
68  assert(kvma && "KVMA is not valid");
69  kernel_start = strtoul(kvma, &endptr, 16);
70 
71  page = (unsigned long)_p;
72  dbg_printf("Update vma: %p, page: %p (kvma:%p)\n", vma, page, kernel_start);
73 
74  tcb = (struct thread *)pthread_self();
75  assert(tcb && "Invalid tcb");
76 
77  arch_data = (struct x86_arch_data *)tcb->arch_data;
78  assert(arch_data && "Invalid arch data");
79 
80  TO_IDX(page, pgt_idx, page_idx);
81  rpgd = arch_data->rpgd;
82  assert(rpgd && "rPGD is not valid");
83 
84  if (kernel_start <= vma) {
85  struct thread *root_tcb;
86  struct pgde *root_rpgd;
87  struct x86_arch_data *root_arch_data;
88 
89  root_tcb = tcb->root;
90  assert(root_tcb && "root TCB is not valid");
91 
92  root_arch_data = root_tcb->arch_data;
93  root_rpgd = root_arch_data->rpgd;
94  assert(root_rpgd && "rPGD is not valid");
95 
96  if (!root_rpgd[pgt_idx].p) {
97  pgt = malloc(pagesize);
98  assert(pgt && "Failed to allocate rpgt");
99 
100  memset(pgt, 0, pagesize);
101 
102  root_rpgd[pgt_idx].p = 1;
103  root_rpgd[pgt_idx].rw = 1;
104  root_rpgd[pgt_idx].pt_base = (uint32_t)pgt / pagesize;
105  } else {
106  pgt = (struct pgte *)(root_rpgd[pgt_idx].pt_base * pagesize);
107  }
108  } else if (!rpgd[pgt_idx].p) {
109  pgt = malloc(pagesize);
110  assert(pgt && "Failed to allocate table");
111 
112  memset(pgt, 0, pagesize);
113  }
114 
115  rpgd[pgt_idx].p = 1;
116  rpgd[pgt_idx].rw = 1;
117  rpgd[pgt_idx].pt_base = (uint32_t)pgt / pagesize;
118 
119  assert(!pgt[page_idx].p && "rpgd, page exists");
120 
121  pgt[page_idx].p = 1;
122  pgt[page_idx].rw = 1;
123  pgt[page_idx].page_base = (uint32_t)vma / pagesize;
124 }
125 
126 static inline void set_new_pgt(struct pgde *pgd, int pgt_idx)
127 {
128  struct x86_arch_data *arch_data;
129  struct pgde *root_pgd;
130  uint32_t kernel_start;
131  struct thread *tcb;
132  const char *kvma;
133  int kpage_idx;
134  int kpgt_idx;
135  char *endptr;
136  void *page;
137 
138  kvma = getenv("kvma");
139  assert(kvma && "kvma is not valid");
140 
141  kernel_start = strtoul(kvma, &endptr, 16);
142  TO_IDX(kernel_start, kpgt_idx, kpage_idx);
143 
144  tcb = pthread_self();
145  assert(tcb && "TCB is not valid");
146 
147  if (tcb->root) {
148  tcb = tcb->root;
149  }
150 
151  arch_data = tcb->arch_data;
152  assert(arch_data && "arch_data is not valid");
153 
154  root_pgd = onetime_map(arch_data->pma_pgd);
155  assert(root_pgd && "failed to do onetime mapping");
156 
157  page = NULL;
158  if (pgt_idx >= kpgt_idx && root_pgd != pgd) {
159  if (root_pgd[pgt_idx].p) {
160  page = (void *)(root_pgd[pgt_idx].pt_base * sysconf(_SC_PAGESIZE));
161  }
162  }
163 
164  if (!page) {
165  page = page_frame_alloc(ZONE_NORMAL, 1);
166  assert(page && "Failed to allocate");
167 
168  page = onetime_map(page);
169  assert(page && "Failed to do mapping");
170 
171  memset(page, 0, sysconf(_SC_PAGESIZE));
172 
173  page = onetime_unmap(page);
174  assert(page && "Failed to do unmapping");
175  }
176 
177  pgd[pgt_idx].p = 1;
178  pgd[pgt_idx].rw = 1;
179  pgd[pgt_idx].pt_base = ((uint32_t)page) / sysconf(_SC_PAGESIZE);
180 
181  if (pgt_idx >= kpgt_idx && root_pgd != pgd) {
182  if (!root_pgd[pgt_idx].p) {
183  root_pgd[pgt_idx].p = 1;
184  root_pgd[pgt_idx].rw = 1;
185  root_pgd[pgt_idx].pt_base = pgd[pgt_idx].pt_base;
186  }
187  }
188 
189  root_pgd = onetime_unmap(root_pgd);
190  assert(root_pgd && "PGD is not valid");
191 }
192 
193 static inline void handle_write_error_for_page(struct pgte *pgt, int page_idx)
194 {
195  void *new_page;
196  void *page;
197 
198  page = (void *)(pgt[page_idx].page_base * sysconf(_SC_PAGESIZE));
199  assert(page && "page is not valid");
200 
201  if (page_frame_refcnt(page) == 1) {
207  pgt[page_idx].rw = 1;
208  pgt = onetime_unmap(pgt);
209  assert(pgt && "Failed to do onetime unmapping");
210  return;
211  }
212 
219  page_frame_free(page);
220 
221  new_page = page_frame_alloc(ZONE_NORMAL, 1);
222  assert(new_page && "Failed to allocate a page frame");
223 
224  new_page = onetime_map(new_page);
225  assert(new_page && "Failed to do onetime mapping");
226 
227  page = onetime_map(page);
228  assert(page && "Failed to do onetime mapping");
229 
230  memcpy(new_page, page, sysconf(_SC_PAGESIZE));
231 
232  new_page = onetime_unmap(new_page);
233  assert(new_page && "Failed to do unmapping");
234 
235  page = onetime_unmap(page);
236  assert(page && "Failed to do unmapping");
237 
238  pgt[page_idx].rw = 1;
239  pgt[page_idx].page_base = (unsigned long)new_page / sysconf(_SC_PAGESIZE);
240 }
241 
242 static inline void handle_write_error_for_table(struct pgde *pgd, struct pgte *pgt, int pgt_idx)
243 {
244  struct pgte *new_pgt;
245  register int i;
246 
247  /* Read only but trying to write */
248  if (page_frame_refcnt(pgt) == 1) {
249  pgt = onetime_map(pgt);
250  assert(pgt && "Failed to do onetime mapping");
251 
252  for (i = 0; i < NR_PAGES; i++) {
253  pgt[i].rw = 0;
254  }
255 
256  pgt = onetime_unmap(pgt);
257  assert(pgt && "Failed to do unmapping");
258 
259  pgd[pgt_idx].rw = 1;
260  return;
261  }
262 
263  new_pgt = page_frame_alloc(ZONE_NORMAL, 1);
264  assert(new_pgt && "Failed to allocate a page frame");
265 
266  new_pgt = onetime_map(new_pgt);
267  assert(new_pgt && "Failed to do onetime mapping");
268 
269  pgt = onetime_map(pgt);
270  assert(pgt && "Failed to onetime mapping");
271 
272  memcpy(new_pgt, pgt, sysconf(_SC_PAGESIZE));
273  for (i = 0; i < NR_PAGES; i++) {
274  new_pgt[i].rw = 0;
275  }
276 
277  new_pgt = onetime_unmap(new_pgt);
278  assert(new_pgt && "Failed to do unmapping");
279 
280  pgt = onetime_unmap(pgt);
281  assert(pgt && "Failed to do unmapping");
282 
283  pgd[pgt_idx].rw = 1;
284  pgd[pgt_idx].pt_base = ((unsigned long)new_pgt / sysconf(_SC_PAGESIZE));
285 
286  page_frame_free(pgt);
287 }
288 
289 int page_fault_handler(int sub_irq, void *info, void *data)
290 {
291  uint32_t fault_vma;
292  struct pgde *pgd;
293  int pgt_idx;
294  int page_idx;
295  struct x86_arch_data *arch_data;
296  struct thread *tcb;
297  struct pt_regs *reg;
298 
299  tcb = pthread_self();
300  assert(tcb && "Invalid TCB");
301 
302  reg = (struct pt_regs *)tcb->esp;
303  assert(reg && "Invalid register");
304 
305  arch_data = tcb->arch_data;
306  assert(arch_data && "Invalid arch data");
307 
308  pgd = onetime_map(arch_data->pma_pgd);
309  assert(pgd && "Failed to do onetime mapping");
310 
311  asm volatile ("mov %%cr2, %0":"=a"(fault_vma):);
312 
313  TO_IDX(fault_vma, pgt_idx, page_idx);
314 
315  if (!pgd[pgt_idx].p) {
316  assert(!reg->ecode.page_fault.present && "Page is already exists");
317  set_new_pgt(pgd, pgt_idx);
318  } else if (!pgd[pgt_idx].rw) {
319  struct pgte *pgt;
320 
321  if (!reg->ecode.page_fault.write) {
322  panic("Unhnadled exception");
323  }
324 
325  pgt = (struct pgte *)(pgd[pgt_idx].pt_base * sysconf(_SC_PAGESIZE));
326  handle_write_error_for_table(pgd, pgt, pgt_idx);
327  } else {
328  struct pgte *pgt;
329 
330  pgt = (struct pgte *)(pgd[pgt_idx].pt_base * sysconf(_SC_PAGESIZE));
331 
332  pgt = onetime_map(pgt);
333  assert(pgt && "pgt is not valid");
334 
335  if (pgt[page_idx].p) {
336  /* NOTE: Read/Write permission error check */
337  if (pgt[page_idx].rw || !reg->ecode.page_fault.write) {
338  panic("Unhandled exception");
339  }
340 
341  dbg_printf("Write fault %p (%p)\n", fault_vma, reg->eip);
342 
343  handle_write_error_for_page(pgt, page_idx);
344  } else {
345  void *page;
346 
347  assert(!reg->ecode.page_fault.present && "page is present");
348 
359  page = set_new_page(pgt, page_idx);
360  update_rpgd(page, fault_vma);
361  }
362 
363  pgt = onetime_unmap(pgt);
364  assert(pgt && "pgt is not valid");
365  }
366 
367  pgd = onetime_unmap(pgd);
368  return 0;
369 }
370 
371 /* End of a file */