This repository has been archived on 2021-05-26. You can view files and clone it, but cannot push or open issues or pull requests.
OS/pintos-env/pintos/threads/thread.c

766 lines
21 KiB
C
Raw Normal View History

2020-03-30 17:21:24 +00:00
// Author: Claudio Maggioni
// Author: Tommaso Rodolfo Masera
#include "threads/thread.h"
#include <debug.h>
#include <stddef.h>
#include <random.h>
#include <stdio.h>
#include <string.h>
#include "threads/flags.h"
#include "threads/interrupt.h"
#include "threads/intr-stubs.h"
#include "threads/palloc.h"
#include "threads/switch.h"
#include "threads/synch.h"
#include "threads/vaddr.h"
2020-03-30 17:21:24 +00:00
#include "devices/timer.h"
#ifdef USERPROG
#include "userprog/process.h"
#endif
/* Random value for struct thread's `magic' member.
Used to detect stack overflow. See the big comment at the top
of thread.h for details. */
#define THREAD_MAGIC 0xcd6abf4b
/* List of processes in THREAD_READY state, that is, processes
that are ready to run but not actually running. */
static struct list ready_list;
/* List of all processes. Processes are added to this list
when they are first scheduled and removed when they exit. */
static struct list all_list;
/* Idle thread. */
static struct thread *idle_thread;
/* Initial thread, the thread running init.c:main(). */
static struct thread *initial_thread;
/* Lock used by allocate_tid(). */
static struct lock tid_lock;
2020-03-30 17:21:24 +00:00
static struct list sleeping;
2020-04-13 12:24:44 +00:00
static FPReal load_avg;
/* Stack frame for kernel_thread(). */
2020-03-30 17:21:24 +00:00
struct kernel_thread_frame
{
void *eip; /* Return address. */
thread_func *function; /* Function to call. */
void *aux; /* Auxiliary data for function. */
};
/* Statistics. */
static long long idle_ticks; /* # of timer ticks spent idle. */
static long long kernel_ticks; /* # of timer ticks in kernel threads. */
static long long user_ticks; /* # of timer ticks in user programs. */
/* Scheduling. */
#define TIME_SLICE 4 /* # of timer ticks to give each thread. */
static unsigned thread_ticks; /* # of timer ticks since last yield. */
/* If false (default), use round-robin scheduler.
If true, use multi-level feedback queue scheduler.
Controlled by kernel command-line option "-o mlfqs". */
bool thread_mlfqs;
static void kernel_thread (thread_func *, void *aux);
static void idle (void *aux UNUSED);
static struct thread *running_thread (void);
static struct thread *next_thread_to_run (void);
static void init_thread (struct thread *, const char *name, int priority);
static bool is_thread (struct thread *) UNUSED;
static void *alloc_frame (struct thread *, size_t size);
static void schedule (void);
void thread_schedule_tail (struct thread *prev);
static tid_t allocate_tid (void);
2020-04-13 12:24:44 +00:00
void thread_yield_for_higher_priority(void);
static void compute_mlfqs_priority(struct thread* t);
bool comp_priority(struct list_elem *, struct list_elem *, void *);
/* Initializes the threading system by transforming the code
that's currently running into a thread. This can't work in
general and it is possible in this case only because loader.S
was careful to put the bottom of the stack at a page boundary.
Also initializes the run queue and the tid lock.
After calling this function, be sure to initialize the page
allocator before trying to create any threads with
thread_create().
It is not safe to call thread_current() until this function
finishes. */
void
2020-03-30 17:21:24 +00:00
thread_init (void)
{
ASSERT (intr_get_level () == INTR_OFF);
lock_init (&tid_lock);
list_init (&ready_list);
list_init (&all_list);
/* Set up a thread structure for the running thread. */
initial_thread = running_thread ();
init_thread (initial_thread, "main", PRI_DEFAULT);
initial_thread->status = THREAD_RUNNING;
initial_thread->tid = allocate_tid ();
2020-03-30 17:21:24 +00:00
list_init(&sleeping);
2020-04-13 12:24:44 +00:00
load_avg = INT_TO_FPR(0);
2020-03-30 17:21:24 +00:00
}
static bool
earlier_wakeup(const struct list_elem* a, const struct list_elem* b, void* aux UNUSED) {
return list_entry(a, struct thread, elem)->wakeup <
list_entry(b, struct thread, elem)->wakeup;
}
void
thread_sleep(uint64_t ticks) {
enum intr_level old_level = intr_disable();
struct thread* cur = thread_current();
cur->wakeup = timer_ticks() + ticks;
// printf("sleeping insert %X\n", cur);
list_insert_ordered(&sleeping, &(cur->elem), earlier_wakeup, NULL);
thread_block();
intr_set_level(old_level);
}
static struct thread* thr(struct list_elem* l) {
return list_entry(l, struct thread, elem);
}
void
thread_unsleep() {
const uint64_t ticks = timer_ticks();
enum intr_level old_level = intr_disable();
if (!list_empty(&sleeping)) {
struct list_elem* t = list_begin(&sleeping);
while (thr(t)->wakeup <= ticks) {
// printf("sleeping remove %X\n", thr(t));
struct thread* s = thr(t);
t = list_remove(t);
thread_unblock(s);
}
}
intr_set_level(old_level);
}
/* Starts preemptive thread scheduling by enabling interrupts.
Also creates the idle thread. */
void
2020-03-30 17:21:24 +00:00
thread_start (void)
{
/* Create the idle thread. */
struct semaphore idle_started;
sema_init (&idle_started, 0);
thread_create ("idle", PRI_MIN, idle, &idle_started);
/* Start preemptive thread scheduling. */
intr_enable ();
/* Wait for the idle thread to initialize idle_thread. */
sema_down (&idle_started);
}
/* Called by the timer interrupt handler at each timer tick.
Thus, this function runs in an external interrupt context. */
void
2020-03-30 17:21:24 +00:00
thread_tick (void)
{
struct thread *t = thread_current ();
/* Update statistics. */
if (t == idle_thread)
idle_ticks++;
#ifdef USERPROG
else if (t->pagedir != NULL)
user_ticks++;
#endif
else
kernel_ticks++;
2020-04-13 12:24:44 +00:00
++thread_ticks;
if (thread_mlfqs) {
2020-04-14 14:20:38 +00:00
if (timer_ticks() % 4 == 0) {
2020-04-13 12:24:44 +00:00
compute_mlfqs_priority(t);
}
2020-04-14 14:20:38 +00:00
if (thread_current() != idle_thread) {
t->recent_cpu = FPR_ADD_INT(t->recent_cpu, 1);
}
2020-04-13 12:24:44 +00:00
2020-04-14 14:20:38 +00:00
if (timer_ticks() % TIMER_FREQ == 0) { // this is true every second
int a = t != idle_thread && (t->status == THREAD_RUNNING || t->status == THREAD_READY) ? 1 : 0;
int thr_running = list_size(&ready_list) + a;
2020-04-13 12:24:44 +00:00
load_avg = FPR_ADD_FPR(
FPR_MUL_FPR(INT_DIV_INT(59, 60), load_avg),
FPR_MUL_INT(INT_DIV_INT(1, 60), thr_running));
FPReal dag = FPR_MUL_INT(load_avg, 2);
2020-04-14 14:20:38 +00:00
struct list_elem* e;
for (e = list_begin(&all_list); e != list_end(&all_list);) {
struct thread* i = list_entry(e, struct thread, allelem);
2020-04-13 12:24:44 +00:00
2020-04-14 14:20:38 +00:00
i->recent_cpu = FPR_MUL_FPR(i->recent_cpu,
FPR_DIV_FPR(dag, FPR_ADD_INT(dag, 1))) + INT_TO_FPR(i->nice);
2020-04-13 12:24:44 +00:00
2020-04-14 14:20:38 +00:00
//printf("\nmalusa recent_cpu: %d, running: %d b %d\n", FPR_TO_INT(t->recent_cpu), thr_running, a);
e = list_next(e);
}
2020-04-13 12:24:44 +00:00
}
}
/* Enforce preemption. */
2020-04-13 12:24:44 +00:00
if (thread_ticks >= TIME_SLICE)
intr_yield_on_return ();
}
2020-04-13 12:24:44 +00:00
int
thread_get_nice (void)
{
2020-04-14 14:20:38 +00:00
enum intr_level old_level = intr_disable();
int r = thread_current()->nice;
intr_set_level(old_level);
return r;
2020-04-13 12:24:44 +00:00
}
void
thread_set_nice (int nice)
{
struct thread* t = thread_current();
enum intr_level old_level = intr_disable();
t->nice = nice > 20 || nice < -20 ? 0 : nice;
compute_mlfqs_priority(t);
intr_set_level(old_level);
thread_yield_for_higher_priority();
}
int
thread_get_recent_cpu (void)
{
2020-04-14 14:20:38 +00:00
enum intr_level old_level = intr_disable();
int r = FPR_TO_INT(FPR_MUL_INT(thread_current()->recent_cpu, 100));
intr_set_level(old_level);
return r;
2020-04-13 12:24:44 +00:00
}
int
thread_get_load_avg (void)
{
2020-04-14 14:20:38 +00:00
enum intr_level old_level = intr_disable();
int r = FPR_TO_INT(FPR_MUL_INT(load_avg, 100));
intr_set_level(old_level);
return r;
2020-04-13 12:24:44 +00:00
}
/* Prints thread statistics. */
void
2020-03-30 17:21:24 +00:00
thread_print_stats (void)
{
printf ("Thread: %lld idle ticks, %lld kernel ticks, %lld user ticks\n",
idle_ticks, kernel_ticks, user_ticks);
}
/* Creates a new kernel thread named NAME with the given initial
PRIORITY, which executes FUNCTION passing AUX as the argument,
and adds it to the ready queue. Returns the thread identifier
for the new thread, or TID_ERROR if creation fails.
If thread_start() has been called, then the new thread may be
scheduled before thread_create() returns. It could even exit
before thread_create() returns. Contrariwise, the original
thread may run for any amount of time before the new thread is
scheduled. Use a semaphore or some other form of
synchronization if you need to ensure ordering.
The code provided sets the new thread's `priority' member to
PRIORITY, but no actual priority scheduling is implemented.
Priority scheduling is the goal of Problem 1-3. */
tid_t
thread_create (const char *name, int priority,
2020-03-30 17:21:24 +00:00
thread_func *function, void *aux)
{
struct thread *t;
struct kernel_thread_frame *kf;
struct switch_entry_frame *ef;
struct switch_threads_frame *sf;
tid_t tid;
enum intr_level old_level;
ASSERT (function != NULL);
/* Allocate thread. */
t = palloc_get_page (PAL_ZERO);
if (t == NULL)
return TID_ERROR;
/* Initialize thread. */
init_thread (t, name, priority);
tid = t->tid = allocate_tid ();
/* Prepare thread for first run by initializing its stack.
2020-03-30 17:21:24 +00:00
Do this atomically so intermediate values for the 'stack'
member cannot be observed. */
old_level = intr_disable ();
/* Stack frame for kernel_thread(). */
kf = alloc_frame (t, sizeof *kf);
kf->eip = NULL;
kf->function = function;
kf->aux = aux;
/* Stack frame for switch_entry(). */
ef = alloc_frame (t, sizeof *ef);
ef->eip = (void (*) (void)) kernel_thread;
/* Stack frame for switch_threads(). */
sf = alloc_frame (t, sizeof *sf);
sf->eip = switch_entry;
sf->ebp = 0;
intr_set_level (old_level);
2020-05-18 13:27:04 +00:00
// Update children list on parent and set parent pointer on child
struct thread* cur = thread_current();
ASSERT (cur != NULL);
//printf("Parent thread is: %s. Child is %s\n", cur->name, t->name);
list_push_back(&cur->children, &t->child_elem);
t->parent = cur;
/* Add to run queue. */
thread_unblock (t);
2020-04-13 12:24:44 +00:00
thread_yield_for_higher_priority();
return tid;
}
/* Puts the current thread to sleep. It will not be scheduled
again until awoken by thread_unblock().
This function must be called with interrupts turned off. It
is usually a better idea to use one of the synchronization
primitives in synch.h. */
void
2020-03-30 17:21:24 +00:00
thread_block (void)
{
ASSERT (!intr_context ());
ASSERT (intr_get_level () == INTR_OFF);
thread_current ()->status = THREAD_BLOCKED;
schedule ();
}
/* Transitions a blocked thread T to the ready-to-run state.
This is an error if T is not blocked. (Use thread_yield() to
make the running thread ready.)
This function does not preempt the running thread. This can
be important: if the caller had disabled interrupts itself,
it may expect that it can atomically unblock a thread and
update other data. */
void
2020-03-30 17:21:24 +00:00
thread_unblock (struct thread *t)
{
enum intr_level old_level;
ASSERT (is_thread (t));
old_level = intr_disable ();
ASSERT (t->status == THREAD_BLOCKED);
list_push_back (&ready_list, &t->elem);
t->status = THREAD_READY;
intr_set_level (old_level);
}
/* Returns the name of the running thread. */
const char *
2020-03-30 17:21:24 +00:00
thread_name (void)
{
return thread_current ()->name;
}
/* Returns the running thread.
This is running_thread() plus a couple of sanity checks.
See the big comment at the top of thread.h for details. */
struct thread *
2020-03-30 17:21:24 +00:00
thread_current (void)
{
struct thread *t = running_thread ();
2020-03-30 17:21:24 +00:00
/* Make sure T is really a thread.
If either of these assertions fire, then your thread may
have overflowed its stack. Each thread has less than 4 kB
of stack, so a few big automatic arrays or moderate
recursion can cause stack overflow. */
ASSERT (is_thread (t));
ASSERT (t->status == THREAD_RUNNING);
return t;
}
/* Returns the running thread's tid. */
tid_t
2020-03-30 17:21:24 +00:00
thread_tid (void)
{
return thread_current ()->tid;
}
/* Deschedules the current thread and destroys it. Never
returns to the caller. */
void
2020-03-30 17:21:24 +00:00
thread_exit (void)
{
ASSERT (!intr_context ());
2020-05-25 12:00:30 +00:00
#ifdef USERPROG
process_exit ();
#endif
/* Remove thread from all threads list, set our status to dying,
and schedule another process. That process will destroy us
when it calls thread_schedule_tail(). */
intr_disable ();
2020-04-26 15:42:05 +00:00
struct thread* t = thread_current();
2020-05-25 12:00:30 +00:00
printf("%s: exit(%d)\n", t->name, t->exit_status);
2020-04-26 15:42:05 +00:00
list_remove (&t->allelem);
sema_up(&t->waiting_sem);
2020-05-25 17:37:51 +00:00
sema_down(&t->parent_sem);
2020-05-18 15:12:13 +00:00
t->status = THREAD_DYING;
2020-05-18 13:27:04 +00:00
list_remove(&t->child_elem);
schedule ();
NOT_REACHED ();
}
/* Yields the CPU. The current thread is not put to sleep and
may be scheduled again immediately at the scheduler's whim. */
void
2020-03-30 17:21:24 +00:00
thread_yield (void)
{
struct thread *cur = thread_current ();
enum intr_level old_level;
2020-03-30 17:21:24 +00:00
ASSERT (!intr_context ());
old_level = intr_disable ();
2020-03-30 17:21:24 +00:00
if (cur != idle_thread)
list_push_back (&ready_list, &cur->elem);
cur->status = THREAD_READY;
schedule ();
intr_set_level (old_level);
}
/* Invoke function 'func' on all threads, passing along 'aux'.
This function must be called with interrupts off. */
void
thread_foreach (thread_action_func *func, void *aux)
{
struct list_elem *e;
ASSERT (intr_get_level () == INTR_OFF);
for (e = list_begin (&all_list); e != list_end (&all_list);
e = list_next (e))
{
struct thread *t = list_entry (e, struct thread, allelem);
func (t, aux);
}
}
int
2020-04-13 12:24:44 +00:00
thread_get_priority (void) {
return thread_current()->priority;
}
2020-04-13 12:24:44 +00:00
/* Sets the current thread's priority to NEW_PRIORITY. */
void
2020-04-13 12:24:44 +00:00
thread_set_priority (int new_priority)
{
2020-04-13 12:24:44 +00:00
thread_current ()->priority = new_priority;
thread_yield_for_higher_priority();
}
/* Idle thread. Executes when no other thread is ready to run.
The idle thread is initially put on the ready list by
thread_start(). It will be scheduled once initially, at which
point it initializes idle_thread, "up"s the semaphore passed
to it to enable thread_start() to continue, and immediately
blocks. After that, the idle thread never appears in the
ready list. It is returned by next_thread_to_run() as a
special case when the ready list is empty. */
static void
2020-03-30 17:21:24 +00:00
idle (void *idle_started_ UNUSED)
{
struct semaphore *idle_started = idle_started_;
idle_thread = thread_current ();
sema_up (idle_started);
2020-03-30 17:21:24 +00:00
for (;;)
{
/* Let someone else run. */
intr_disable ();
thread_block ();
/* Re-enable interrupts and wait for the next one.
The `sti' instruction disables interrupts until the
completion of the next instruction, so these two
instructions are executed atomically. This atomicity is
important; otherwise, an interrupt could be handled
between re-enabling interrupts and waiting for the next
one to occur, wasting as much as one clock tick worth of
time.
See [IA32-v2a] "HLT", [IA32-v2b] "STI", and [IA32-v3a]
7.11.1 "HLT Instruction". */
asm volatile ("sti; hlt" : : : "memory");
}
}
/* Function used as the basis for a kernel thread. */
static void
2020-03-30 17:21:24 +00:00
kernel_thread (thread_func *function, void *aux)
{
ASSERT (function != NULL);
intr_enable (); /* The scheduler runs with interrupts off. */
function (aux); /* Execute the thread function. */
thread_exit (); /* If function() returns, kill the thread. */
}
/* Returns the running thread. */
struct thread *
2020-03-30 17:21:24 +00:00
running_thread (void)
{
uint32_t *esp;
/* Copy the CPU's stack pointer into `esp', and then round that
down to the start of a page. Because `struct thread' is
always at the beginning of a page and the stack pointer is
somewhere in the middle, this locates the curent thread. */
asm ("mov %%esp, %0" : "=g" (esp));
return pg_round_down (esp);
}
/* Returns true if T appears to point to a valid thread. */
static bool
is_thread (struct thread *t)
{
return t != NULL && t->magic == THREAD_MAGIC;
}
2020-04-13 12:24:44 +00:00
static void
compute_mlfqs_priority(struct thread* t) {
t->priority = PRI_MAX - FPR_TO_INT(FPR_DIV_INT(t->recent_cpu, 4))
- (t->nice * 2);
if (t->priority < PRI_MIN) t->priority = PRI_MIN;
else if (t->priority > PRI_MAX) t->priority = PRI_MAX;
}
/* Does basic initialization of T as a blocked thread named
NAME. */
static void
init_thread (struct thread *t, const char *name, int priority)
{
ASSERT (t != NULL);
ASSERT (PRI_MIN <= priority && priority <= PRI_MAX);
ASSERT (name != NULL);
memset (t, 0, sizeof *t);
t->status = THREAD_BLOCKED;
strlcpy (t->name, name, sizeof t->name);
t->stack = (uint8_t *) t + PGSIZE;
2020-04-26 15:42:05 +00:00
sema_init(&t->waiting_sem, 0);
2020-05-18 15:12:13 +00:00
sema_init(&t->parent_sem, 0);
2020-05-18 12:57:52 +00:00
t->waited_on_before = 0;
2020-05-25 12:00:30 +00:00
t->exit_status = -1;
2020-05-18 13:27:04 +00:00
list_init(&t->children);
2020-04-13 12:24:44 +00:00
if (thread_mlfqs) {
t->nice_init = priority > 20 || priority < 20 ? 0 : priority;
t->nice = t->nice_init;
t->recent_cpu = 0;
compute_mlfqs_priority(t);
} else {
t->priority = priority;
}
t->magic = THREAD_MAGIC;
list_push_back (&all_list, &t->allelem);
}
/* Allocates a SIZE-byte frame at the top of thread T's stack and
returns a pointer to the frame's base. */
static void *
2020-03-30 17:21:24 +00:00
alloc_frame (struct thread *t, size_t size)
{
/* Stack data is always allocated in word-size units. */
ASSERT (is_thread (t));
ASSERT (size % sizeof (uint32_t) == 0);
t->stack -= size;
return t->stack;
}
/* Chooses and returns the next thread to be scheduled. Should
return a thread from the run queue, unless the run queue is
empty. (If the running thread can continue running, then it
will be in the run queue.) If the run queue is empty, return
idle_thread. */
static struct thread *
2020-03-30 17:21:24 +00:00
next_thread_to_run (void)
{
if (list_empty (&ready_list))
return idle_thread;
2020-04-13 12:24:44 +00:00
else {
// return list_entry (list_pop_front (&ready_list), struct thread, elem);
struct list_elem * th_max_elem = list_max(&ready_list, comp_priority, NULL);
struct thread * th_max = list_entry(th_max_elem, struct thread, elem);
list_remove(th_max_elem);
return th_max;
}
}
bool comp_priority(struct list_elem * a, struct list_elem * b, void *aux) {
struct thread * at = list_entry(a, struct thread, elem);
struct thread * bt = list_entry(b, struct thread, elem);
return at -> priority < bt -> priority;
}
void thread_yield_for_higher_priority(void) {
enum intr_level old_level = intr_disable();
if (!list_empty(&ready_list)) {
struct list_elem * th_max_elem = list_max(&ready_list, comp_priority, NULL);
struct thread * th_max = list_entry(th_max_elem, struct thread, elem);
struct thread * th_cur = thread_current();
if (th_cur -> priority < th_max -> priority)
thread_yield();
}
intr_set_level(old_level);
}
/* Completes a thread switch by activating the new thread's page
tables, and, if the previous thread is dying, destroying it.
At this function's invocation, we just switched from thread
PREV, the new thread is already running, and interrupts are
still disabled. This function is normally invoked by
thread_schedule() as its final action before returning, but
the first time a thread is scheduled it is called by
switch_entry() (see switch.S).
It's not safe to call printf() until the thread switch is
complete. In practice that means that printf()s should be
added at the end of the function.
After this function and its caller returns, the thread switch
is complete. */
void
thread_schedule_tail (struct thread *prev)
{
struct thread *cur = running_thread ();
2020-03-30 17:21:24 +00:00
ASSERT (intr_get_level () == INTR_OFF);
/* Mark us as running. */
cur->status = THREAD_RUNNING;
/* Start new time slice. */
thread_ticks = 0;
#ifdef USERPROG
/* Activate the new address space. */
process_activate ();
#endif
/* If the thread we switched from is dying, destroy its struct
thread. This must happen late so that thread_exit() doesn't
pull out the rug under itself. (We don't free
initial_thread because its memory was not obtained via
palloc().) */
2020-03-30 17:21:24 +00:00
if (prev != NULL && prev->status == THREAD_DYING && prev != initial_thread)
{
ASSERT (prev != cur);
palloc_free_page (prev);
}
}
/* Schedules a new process. At entry, interrupts must be off and
the running process's state must have been changed from
running to some other state. This function finds another
thread to run and switches to it.
It's not safe to call printf() until thread_schedule_tail()
has completed. */
static void
2020-03-30 17:21:24 +00:00
schedule (void)
{
struct thread *cur = running_thread ();
struct thread *next = next_thread_to_run ();
struct thread *prev = NULL;
ASSERT (intr_get_level () == INTR_OFF);
ASSERT (cur->status != THREAD_RUNNING);
ASSERT (is_thread (next));
if (cur != next)
prev = switch_threads (cur, next);
thread_schedule_tail (prev);
}
2020-04-26 15:42:05 +00:00
struct thread *
thread_get_by_tid (tid_t tid)
{
2020-05-18 13:27:04 +00:00
struct list* children = &thread_current()->children;
2020-04-26 15:42:05 +00:00
struct list_elem *e;
2020-05-18 13:27:04 +00:00
for (e = list_begin (children);
e != list_end (children);
2020-04-26 15:42:05 +00:00
e = list_next (e))
{
2020-05-18 13:27:04 +00:00
struct thread *t = list_entry (e, struct thread, child_elem);
2020-04-26 15:42:05 +00:00
if (t->tid == tid)
return t;
}
return NULL;
}
/* Returns a tid to use for a new thread. */
static tid_t
2020-03-30 17:21:24 +00:00
allocate_tid (void)
{
static tid_t next_tid = 1;
tid_t tid;
lock_acquire (&tid_lock);
tid = next_tid++;
lock_release (&tid_lock);
return tid;
}
/* Offset of `stack' member within `struct thread'.
Used by switch.S, which can't figure it out on its own. */
uint32_t thread_stack_ofs = offsetof (struct thread, stack);