task.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. /* vim: tabstop=4 shiftwidth=4 noexpandtab
  2. * This file is part of ToaruOS and is released under the terms
  3. * of the NCSA / University of Illinois License - see LICENSE.md
  4. * Copyright (C) 2011-2014 Kevin Lange
  5. * Copyright (C) 2012 Markus Schober
  6. *
  7. * Task Switching and Management Functions
  8. *
  9. */
  10. #include <system.h>
  11. #include <process.h>
  12. #include <logging.h>
  13. #include <shm.h>
  14. #include <mem.h>
  15. #define TASK_MAGIC 0xDEADBEEF
  16. uint32_t next_pid = 0;
  17. #define PUSH(stack, type, item) stack -= sizeof(type); \
  18. *((type *) stack) = item
  19. page_directory_t *kernel_directory;
  20. page_directory_t *current_directory;
  21. /*
  22. * Clone a page directory and its contents.
  23. * (If you do not intend to clone the contents, do it yourself!)
  24. *
  25. * @param src Pointer to source directory to clone from.
  26. * @return A pointer to a new directory.
  27. */
  28. page_directory_t *
  29. clone_directory(
  30. page_directory_t * src
  31. ) {
  32. /* Allocate a new page directory */
  33. uintptr_t phys;
  34. page_directory_t * dir = (page_directory_t *)kvmalloc_p(sizeof(page_directory_t), &phys);
  35. /* Clear it out */
  36. memset(dir, 0, sizeof(page_directory_t));
  37. dir->ref_count = 1;
  38. /* And store it... */
  39. dir->physical_address = phys;
  40. uint32_t i;
  41. for (i = 0; i < 1024; ++i) {
  42. /* Copy each table */
  43. if (!src->tables[i] || (uintptr_t)src->tables[i] == (uintptr_t)0xFFFFFFFF) {
  44. continue;
  45. }
  46. if (kernel_directory->tables[i] == src->tables[i]) {
  47. /* Kernel tables are simply linked together */
  48. dir->tables[i] = src->tables[i];
  49. dir->physical_tables[i] = src->physical_tables[i];
  50. } else {
  51. if (i * 0x1000 * 1024 < SHM_START) {
  52. /* User tables must be cloned */
  53. uintptr_t phys;
  54. dir->tables[i] = clone_table(src->tables[i], &phys);
  55. dir->physical_tables[i] = phys | 0x07;
  56. }
  57. }
  58. }
  59. return dir;
  60. }
  61. /*
  62. * Free a directory and its tables
  63. */
  64. void release_directory(page_directory_t * dir) {
  65. dir->ref_count--;
  66. if (dir->ref_count < 1) {
  67. uint32_t i;
  68. for (i = 0; i < 1024; ++i) {
  69. if (!dir->tables[i] || (uintptr_t)dir->tables[i] == (uintptr_t)0xFFFFFFFF) {
  70. continue;
  71. }
  72. if (kernel_directory->tables[i] != dir->tables[i]) {
  73. if (i * 0x1000 * 1024 < SHM_START) {
  74. for (uint32_t j = 0; j < 1024; ++j) {
  75. if (dir->tables[i]->pages[j].frame) {
  76. free_frame(&(dir->tables[i]->pages[j]));
  77. }
  78. }
  79. }
  80. free(dir->tables[i]);
  81. }
  82. }
  83. free(dir);
  84. }
  85. }
  86. void release_directory_for_exec(page_directory_t * dir) {
  87. uint32_t i;
  88. /* This better be the only owner of this directory... */
  89. for (i = 0; i < 1024; ++i) {
  90. if (!dir->tables[i] || (uintptr_t)dir->tables[i] == (uintptr_t)0xFFFFFFFF) {
  91. continue;
  92. }
  93. if (kernel_directory->tables[i] != dir->tables[i]) {
  94. if (i * 0x1000 * 1024 < USER_STACK_BOTTOM) {
  95. for (uint32_t j = 0; j < 1024; ++j) {
  96. if (dir->tables[i]->pages[j].frame) {
  97. free_frame(&(dir->tables[i]->pages[j]));
  98. }
  99. }
  100. dir->physical_tables[i] = 0;
  101. free(dir->tables[i]);
  102. dir->tables[i] = 0;
  103. }
  104. }
  105. }
  106. }
  107. extern char * default_name;
  108. /*
  109. * Clone a page table
  110. *
  111. * @param src Pointer to a page table to clone.
  112. * @param physAddr [out] Pointer to the physical address of the new page table
  113. * @return A pointer to a new page table.
  114. */
  115. page_table_t *
  116. clone_table(
  117. page_table_t * src,
  118. uintptr_t * physAddr
  119. ) {
  120. /* Allocate a new page table */
  121. page_table_t * table = (page_table_t *)kvmalloc_p(sizeof(page_table_t), physAddr);
  122. memset(table, 0, sizeof(page_table_t));
  123. uint32_t i;
  124. for (i = 0; i < 1024; ++i) {
  125. /* For each frame in the table... */
  126. if (!src->pages[i].frame) {
  127. continue;
  128. }
  129. /* Allocate a new frame */
  130. alloc_frame(&table->pages[i], 0, 0);
  131. /* Set the correct access bit */
  132. if (src->pages[i].present) table->pages[i].present = 1;
  133. if (src->pages[i].rw) table->pages[i].rw = 1;
  134. if (src->pages[i].user) table->pages[i].user = 1;
  135. if (src->pages[i].accessed) table->pages[i].accessed = 1;
  136. if (src->pages[i].dirty) table->pages[i].dirty = 1;
  137. /* Copy the contents of the page from the old table to the new one */
  138. copy_page_physical(src->pages[i].frame * 0x1000, table->pages[i].frame * 0x1000);
  139. }
  140. return table;
  141. }
  142. uintptr_t frozen_stack = 0;
  143. /*
  144. * Install multitasking functionality.
  145. */
  146. void tasking_install(void) {
  147. IRQ_OFF; /* Disable interrupts */
  148. debug_print(NOTICE, "Initializing multitasking");
  149. /* Initialize the process tree */
  150. initialize_process_tree();
  151. /* Spawn the initial process */
  152. current_process = spawn_init();
  153. kernel_idle_task = spawn_kidle();
  154. /* Initialize the paging environment */
  155. #if 0
  156. set_process_environment((process_t *)current_process, current_directory);
  157. #endif
  158. /* Switch to the kernel directory */
  159. switch_page_directory(current_process->thread.page_directory);
  160. frozen_stack = (uintptr_t)valloc(KERNEL_STACK_SIZE);
  161. /* Reenable interrupts */
  162. IRQ_RES;
  163. }
  164. /*
  165. * Fork.
  166. *
  167. * @return To the parent: PID of the child; to the child: 0
  168. */
  169. uint32_t fork(void) {
  170. IRQ_OFF;
  171. uintptr_t esp, ebp;
  172. current_process->syscall_registers->eax = 0;
  173. /* Make a pointer to the parent process (us) on the stack */
  174. process_t * parent = (process_t *)current_process;
  175. assert(parent && "Forked from nothing??");
  176. /* Clone the current process' page directory */
  177. page_directory_t * directory = clone_directory(current_directory);
  178. assert(directory && "Could not allocate a new page directory!");
  179. /* Spawn a new process from this one */
  180. debug_print(INFO,"\033[1;32mALLOC {\033[0m");
  181. process_t * new_proc = spawn_process(current_process);
  182. debug_print(INFO,"\033[1;32m}\033[0m");
  183. assert(new_proc && "Could not allocate a new process!");
  184. /* Set the new process' page directory to clone */
  185. set_process_environment(new_proc, directory);
  186. struct regs r;
  187. memcpy(&r, current_process->syscall_registers, sizeof(struct regs));
  188. new_proc->syscall_registers = &r;
  189. esp = new_proc->image.stack;
  190. ebp = esp;
  191. new_proc->syscall_registers->eax = 0;
  192. PUSH(esp, struct regs, r);
  193. new_proc->thread.esp = esp;
  194. new_proc->thread.ebp = ebp;
  195. new_proc->is_tasklet = parent->is_tasklet;
  196. new_proc->thread.eip = (uintptr_t)&return_to_userspace;
  197. /* Add the new process to the ready queue */
  198. make_process_ready(new_proc);
  199. IRQ_RES;
  200. /* Return the child PID */
  201. return new_proc->id;
  202. }
  203. int create_kernel_tasklet(tasklet_t tasklet, char * name, void * argp) {
  204. IRQ_OFF;
  205. uintptr_t esp, ebp;
  206. if (current_process->syscall_registers) {
  207. current_process->syscall_registers->eax = 0;
  208. }
  209. page_directory_t * directory = kernel_directory;
  210. /* Spawn a new process from this one */
  211. process_t * new_proc = spawn_process(current_process);
  212. assert(new_proc && "Could not allocate a new process!");
  213. /* Set the new process' page directory to the original process' */
  214. set_process_environment(new_proc, directory);
  215. directory->ref_count++;
  216. /* Read the instruction pointer */
  217. if (current_process->syscall_registers) {
  218. struct regs r;
  219. memcpy(&r, current_process->syscall_registers, sizeof(struct regs));
  220. new_proc->syscall_registers = &r;
  221. }
  222. esp = new_proc->image.stack;
  223. ebp = esp;
  224. if (current_process->syscall_registers) {
  225. new_proc->syscall_registers->eax = 0;
  226. }
  227. new_proc->is_tasklet = 1;
  228. new_proc->name = name;
  229. PUSH(esp, uintptr_t, (uintptr_t)name);
  230. PUSH(esp, uintptr_t, (uintptr_t)argp);
  231. PUSH(esp, uintptr_t, (uintptr_t)&task_exit);
  232. new_proc->thread.esp = esp;
  233. new_proc->thread.ebp = ebp;
  234. new_proc->thread.eip = (uintptr_t)tasklet;
  235. /* Add the new process to the ready queue */
  236. make_process_ready(new_proc);
  237. IRQ_RES;
  238. /* Return the child PID */
  239. return new_proc->id;
  240. }
  241. /*
  242. * clone the current thread and create a new one in the same
  243. * memory space with the given pointer as its new stack.
  244. */
  245. uint32_t
  246. clone(uintptr_t new_stack, uintptr_t thread_func, uintptr_t arg) {
  247. uintptr_t esp, ebp;
  248. IRQ_OFF;
  249. current_process->syscall_registers->eax = 0;
  250. /* Make a pointer to the parent process (us) on the stack */
  251. process_t * parent = (process_t *)current_process;
  252. assert(parent && "Cloned from nothing??");
  253. page_directory_t * directory = current_directory;
  254. /* Spawn a new process from this one */
  255. process_t * new_proc = spawn_process(current_process);
  256. assert(new_proc && "Could not allocate a new process!");
  257. /* Set the new process' page directory to the original process' */
  258. set_process_environment(new_proc, directory);
  259. directory->ref_count++;
  260. /* Read the instruction pointer */
  261. struct regs r;
  262. memcpy(&r, current_process->syscall_registers, sizeof(struct regs));
  263. new_proc->syscall_registers = &r;
  264. esp = new_proc->image.stack;
  265. ebp = esp;
  266. /* Set the gid */
  267. if (current_process->group) {
  268. new_proc->group = current_process->group;
  269. } else {
  270. /* We are the session leader */
  271. new_proc->group = current_process->id;
  272. }
  273. new_proc->syscall_registers->ebp = new_stack;
  274. new_proc->syscall_registers->eip = thread_func;
  275. /* Push arg, bogus return address onto the new thread's stack */
  276. new_stack -= sizeof(uintptr_t);
  277. *((uintptr_t *)new_stack) = arg;
  278. new_stack -= sizeof(uintptr_t);
  279. *((uintptr_t *)new_stack) = THREAD_RETURN;
  280. /* Set esp, ebp, and eip for the new thread */
  281. new_proc->syscall_registers->esp = new_stack;
  282. new_proc->syscall_registers->useresp = new_stack;
  283. PUSH(esp, struct regs, r);
  284. new_proc->thread.esp = esp;
  285. new_proc->thread.ebp = ebp;
  286. new_proc->is_tasklet = parent->is_tasklet;
  287. free(new_proc->fds);
  288. new_proc->fds = current_process->fds;
  289. new_proc->fds->refs++;
  290. new_proc->thread.eip = (uintptr_t)&return_to_userspace;
  291. /* Add the new process to the ready queue */
  292. make_process_ready(new_proc);
  293. IRQ_RES;
  294. /* Return the child PID */
  295. return new_proc->id;
  296. }
  297. /*
  298. * Get the process ID of the current process.
  299. *
  300. * @return The PID of the current process.
  301. */
  302. uint32_t getpid(void) {
  303. /* Fairly self-explanatory. */
  304. return current_process->id;
  305. }
  306. /*
  307. * Switch to the next ready task.
  308. *
  309. * This is called from the interrupt handler for the interval timer to
  310. * perform standard task switching.
  311. */
  312. void switch_task(uint8_t reschedule) {
  313. if (!current_process) {
  314. /* Tasking is not yet installed. */
  315. return;
  316. }
  317. if (!current_process->running) {
  318. switch_next();
  319. }
  320. /* Collect the current kernel stack and instruction pointers */
  321. uintptr_t esp, ebp, eip;
  322. asm volatile ("mov %%esp, %0" : "=r" (esp));
  323. asm volatile ("mov %%ebp, %0" : "=r" (ebp));
  324. eip = read_eip();
  325. if (eip == 0x10000) {
  326. /* Returned from EIP after task switch, we have
  327. * finished switching. */
  328. fix_signal_stacks();
  329. /* XXX: Signals */
  330. if (!current_process->finished) {
  331. if (current_process->signal_queue->length > 0) {
  332. node_t * node = list_dequeue(current_process->signal_queue);
  333. signal_t * sig = node->value;
  334. free(node);
  335. handle_signal((process_t *)current_process, sig);
  336. }
  337. }
  338. return;
  339. }
  340. /* Remember this process' ESP/EBP/EIP */
  341. current_process->thread.eip = eip;
  342. current_process->thread.esp = esp;
  343. current_process->thread.ebp = ebp;
  344. current_process->running = 0;
  345. /* Save floating point state */
  346. switch_fpu();
  347. if (reschedule && current_process != kernel_idle_task) {
  348. /* And reinsert it into the ready queue */
  349. make_process_ready((process_t *)current_process);
  350. }
  351. /* Switch to the next task */
  352. switch_next();
  353. }
  354. /*
  355. * Immediately switch to the next task.
  356. *
  357. * Does not store the ESP/EBP/EIP of the current thread.
  358. */
  359. void switch_next(void) {
  360. uintptr_t esp, ebp, eip;
  361. /* Get the next available process */
  362. current_process = next_ready_process();
  363. /* Retreive the ESP/EBP/EIP */
  364. eip = current_process->thread.eip;
  365. esp = current_process->thread.esp;
  366. ebp = current_process->thread.ebp;
  367. /* Validate */
  368. if ((eip < (uintptr_t)&code) || (eip > (uintptr_t)heap_end)) {
  369. debug_print(WARNING, "Skipping broken process %d! [eip=0x%x <0x%x or >0x%x]", current_process->id, eip, &code, &end);
  370. switch_next();
  371. }
  372. if (current_process->finished) {
  373. debug_print(WARNING, "Tried to switch to process %d, but it claims it is finished.", current_process->id);
  374. switch_next();
  375. }
  376. /* Set the page directory */
  377. current_directory = current_process->thread.page_directory;
  378. switch_page_directory(current_directory);
  379. /* Set the kernel stack in the TSS */
  380. set_kernel_stack(current_process->image.stack);
  381. if (current_process->started) {
  382. if (!current_process->signal_kstack) {
  383. if (current_process->signal_queue->length > 0) {
  384. current_process->signal_kstack = malloc(KERNEL_STACK_SIZE);
  385. current_process->signal_state.esp = current_process->thread.esp;
  386. current_process->signal_state.eip = current_process->thread.eip;
  387. current_process->signal_state.ebp = current_process->thread.ebp;
  388. memcpy(current_process->signal_kstack, (void *)(current_process->image.stack - KERNEL_STACK_SIZE), KERNEL_STACK_SIZE);
  389. }
  390. }
  391. } else {
  392. current_process->started = 1;
  393. }
  394. current_process->running = 1;
  395. /* Jump, baby, jump */
  396. asm volatile (
  397. "mov %0, %%ebx\n"
  398. "mov %1, %%esp\n"
  399. "mov %2, %%ebp\n"
  400. "mov %3, %%cr3\n"
  401. "mov $0x10000, %%eax\n" /* read_eip() will return 0x10000 */
  402. "jmp *%%ebx"
  403. : : "r" (eip), "r" (esp), "r" (ebp), "r" (current_directory->physical_address)
  404. : "%ebx", "%esp", "%eax");
  405. }
  406. extern void enter_userspace(uintptr_t location, uintptr_t stack);
  407. /*
  408. * Enter ring 3 and jump to `location`.
  409. *
  410. * @param location Address to jump to in user space
  411. * @param argc Argument count
  412. * @param argv Argument pointers
  413. * @param stack Userspace stack address
  414. */
  415. void
  416. enter_user_jmp(uintptr_t location, int argc, char ** argv, uintptr_t stack) {
  417. IRQ_OFF;
  418. set_kernel_stack(current_process->image.stack);
  419. PUSH(stack, uintptr_t, (uintptr_t)argv);
  420. PUSH(stack, int, argc);
  421. enter_userspace(location, stack);
  422. }
  423. /*
  424. * Dequeue the current task and set it as finished
  425. *
  426. * @param retval Set the return value to this.
  427. */
  428. void task_exit(int retval) {
  429. /* Free the image memory */
  430. if (__builtin_expect(current_process->id == 0,0)) {
  431. /* This is probably bad... */
  432. switch_next();
  433. return;
  434. }
  435. cleanup_process((process_t *)current_process, retval);
  436. process_t * parent = process_get_parent((process_t *)current_process);
  437. if (parent) {
  438. wakeup_queue(parent->wait_queue);
  439. }
  440. switch_next();
  441. }
  442. /*
  443. * Call task_exit() and immediately STOP if we can't.
  444. */
  445. void kexit(int retval) {
  446. task_exit(retval);
  447. debug_print(CRITICAL, "Process returned from task_exit! Environment is definitely unclean. Stopping.");
  448. STOP;
  449. }