diff --git a/pintos-env/pintos/devices/timer.c b/pintos-env/pintos/devices/timer.c index befaaae..3ca85a0 100755 --- a/pintos-env/pintos/devices/timer.c +++ b/pintos-env/pintos/devices/timer.c @@ -1,3 +1,6 @@ +// Author: Claudio Maggioni +// Author: Tommaso Rodolfo Masera + #include "devices/timer.h" #include #include @@ -7,7 +10,9 @@ #include "threads/interrupt.h" #include "threads/synch.h" #include "threads/thread.h" - +#include "threads/malloc.h" +#include "../lib/kernel/list.h" + /* See [8254] for hardware details of the 8254 timer chip. */ #if TIMER_FREQ < 19 @@ -33,7 +38,7 @@ static void real_time_delay (int64_t num, int32_t denom); /* Sets up the timer to interrupt TIMER_FREQ times per second, and registers the corresponding interrupt. */ void -timer_init (void) +timer_init (void) { pit_configure_channel (0, 2, TIMER_FREQ); intr_register_ext (0x20, timer_interrupt, "8254 Timer"); @@ -41,7 +46,7 @@ timer_init (void) /* Calibrates loops_per_tick, used to implement brief delays. */ void -timer_calibrate (void) +timer_calibrate (void) { unsigned high_bit, test_bit; @@ -51,7 +56,7 @@ timer_calibrate (void) /* Approximate loops_per_tick as the largest power-of-two still less than one timer tick. */ loops_per_tick = 1u << 10; - while (!too_many_loops (loops_per_tick << 1)) + while (!too_many_loops (loops_per_tick << 1)) { loops_per_tick <<= 1; ASSERT (loops_per_tick != 0); @@ -68,7 +73,7 @@ timer_calibrate (void) /* Returns the number of timer ticks since the OS booted. */ int64_t -timer_ticks (void) +timer_ticks (void) { enum intr_level old_level = intr_disable (); int64_t t = ticks; @@ -79,7 +84,7 @@ timer_ticks (void) /* Returns the number of timer ticks elapsed since THEN, which should be a value once returned by timer_ticks(). */ int64_t -timer_elapsed (int64_t then) +timer_elapsed (int64_t then) { return timer_ticks () - then; } @@ -87,19 +92,20 @@ timer_elapsed (int64_t then) /* Sleeps for approximately TICKS timer ticks. Interrupts must be turned on. */ void -timer_sleep (int64_t ticks) +timer_sleep (int64_t ticks) { - int64_t start = timer_ticks (); + if (ticks <= 0) + return; - ASSERT (intr_get_level () == INTR_ON); - while (timer_elapsed (start) < ticks) - thread_yield (); + ASSERT (intr_get_level() == INTR_ON); + + thread_sleep(ticks); } /* Sleeps for approximately MS milliseconds. Interrupts must be turned on. */ void -timer_msleep (int64_t ms) +timer_msleep (int64_t ms) { real_time_sleep (ms, 1000); } @@ -107,7 +113,7 @@ timer_msleep (int64_t ms) /* Sleeps for approximately US microseconds. Interrupts must be turned on. */ void -timer_usleep (int64_t us) +timer_usleep (int64_t us) { real_time_sleep (us, 1000 * 1000); } @@ -115,7 +121,7 @@ timer_usleep (int64_t us) /* Sleeps for approximately NS nanoseconds. Interrupts must be turned on. */ void -timer_nsleep (int64_t ns) +timer_nsleep (int64_t ns) { real_time_sleep (ns, 1000 * 1000 * 1000); } @@ -128,7 +134,7 @@ timer_nsleep (int64_t ns) will cause timer ticks to be lost. Thus, use timer_msleep() instead if interrupts are enabled. */ void -timer_mdelay (int64_t ms) +timer_mdelay (int64_t ms) { real_time_delay (ms, 1000); } @@ -141,7 +147,7 @@ timer_mdelay (int64_t ms) will cause timer ticks to be lost. Thus, use timer_usleep() instead if interrupts are enabled. */ void -timer_udelay (int64_t us) +timer_udelay (int64_t us) { real_time_delay (us, 1000 * 1000); } @@ -154,14 +160,14 @@ timer_udelay (int64_t us) will cause timer ticks to be lost. Thus, use timer_nsleep() instead if interrupts are enabled.*/ void -timer_ndelay (int64_t ns) +timer_ndelay (int64_t ns) { real_time_delay (ns, 1000 * 1000 * 1000); } /* Prints timer statistics. */ void -timer_print_stats (void) +timer_print_stats (void) { printf ("Timer: %"PRId64" ticks\n", timer_ticks ()); } @@ -171,13 +177,15 @@ static void timer_interrupt (struct intr_frame *args UNUSED) { ticks++; - thread_tick (); + + thread_unsleep(); + thread_tick(); } /* Returns true if LOOPS iterations waits for more than one timer tick, otherwise false. */ static bool -too_many_loops (unsigned loops) +too_many_loops (unsigned loops) { /* Wait for a timer tick. */ int64_t start = ticks; @@ -201,7 +209,7 @@ too_many_loops (unsigned loops) differently in different places the results would be difficult to predict. */ static void NO_INLINE -busy_wait (int64_t loops) +busy_wait (int64_t loops) { while (loops-- > 0) barrier (); @@ -209,12 +217,12 @@ busy_wait (int64_t loops) /* Sleep for approximately NUM/DENOM seconds. */ static void -real_time_sleep (int64_t num, int32_t denom) +real_time_sleep (int64_t num, int32_t denom) { /* Convert NUM/DENOM seconds into timer ticks, rounding down. - - (NUM / DENOM) s - ---------------------- = NUM * TIMER_FREQ / DENOM ticks. + + (NUM / DENOM) s + ---------------------- = NUM * TIMER_FREQ / DENOM ticks. 1 s / TIMER_FREQ ticks */ int64_t ticks = num * TIMER_FREQ / denom; @@ -224,14 +232,14 @@ real_time_sleep (int64_t num, int32_t denom) { /* We're waiting for at least one full timer tick. Use timer_sleep() because it will yield the CPU to other - processes. */ - timer_sleep (ticks); + processes. */ + timer_sleep (ticks); } - else + else { /* Otherwise, use a busy-wait loop for more accurate sub-tick timing. */ - real_time_delay (num, denom); + real_time_delay (num, denom); } } @@ -242,5 +250,5 @@ real_time_delay (int64_t num, int32_t denom) /* Scale the numerator and denominator down by 1000 to avoid the possibility of overflow. */ ASSERT (denom % 1000 == 0); - busy_wait (loops_per_tick * num / 1000 * TIMER_FREQ / (denom / 1000)); + busy_wait (loops_per_tick * num / 1000 * TIMER_FREQ / (denom / 1000)); } diff --git a/pintos-env/pintos/threads/thread.c b/pintos-env/pintos/threads/thread.c index 955ccdc..4629e8c 100755 --- a/pintos-env/pintos/threads/thread.c +++ b/pintos-env/pintos/threads/thread.c @@ -1,3 +1,6 @@ +// Author: Claudio Maggioni +// Author: Tommaso Rodolfo Masera + #include "threads/thread.h" #include #include @@ -11,6 +14,7 @@ #include "threads/switch.h" #include "threads/synch.h" #include "threads/vaddr.h" +#include "devices/timer.h" #ifdef USERPROG #include "userprog/process.h" #endif @@ -37,8 +41,10 @@ static struct thread *initial_thread; /* Lock used by allocate_tid(). */ static struct lock tid_lock; +static struct list sleeping; + /* Stack frame for kernel_thread(). */ -struct kernel_thread_frame +struct kernel_thread_frame { void *eip; /* Return address. */ thread_func *function; /* Function to call. */ @@ -85,7 +91,7 @@ static tid_t allocate_tid (void); It is not safe to call thread_current() until this function finishes. */ void -thread_init (void) +thread_init (void) { ASSERT (intr_get_level () == INTR_OFF); @@ -98,12 +104,52 @@ thread_init (void) init_thread (initial_thread, "main", PRI_DEFAULT); initial_thread->status = THREAD_RUNNING; initial_thread->tid = allocate_tid (); + + list_init(&sleeping); +} + +static bool +earlier_wakeup(const struct list_elem* a, const struct list_elem* b, void* aux UNUSED) { + return list_entry(a, struct thread, elem)->wakeup < + list_entry(b, struct thread, elem)->wakeup; +} + +void +thread_sleep(uint64_t ticks) { + enum intr_level old_level = intr_disable(); + struct thread* cur = thread_current(); + cur->wakeup = timer_ticks() + ticks; + // printf("sleeping insert %X\n", cur); + list_insert_ordered(&sleeping, &(cur->elem), earlier_wakeup, NULL); + thread_block(); + intr_set_level(old_level); +} + +static struct thread* thr(struct list_elem* l) { + return list_entry(l, struct thread, elem); +} + +void +thread_unsleep() { + const uint64_t ticks = timer_ticks(); + enum intr_level old_level = intr_disable(); + if (!list_empty(&sleeping)) { + + struct list_elem* t = list_begin(&sleeping); + while (thr(t)->wakeup <= ticks) { + // printf("sleeping remove %X\n", thr(t)); + struct thread* s = thr(t); + t = list_remove(t); + thread_unblock(s); + } + } + intr_set_level(old_level); } /* Starts preemptive thread scheduling by enabling interrupts. Also creates the idle thread. */ void -thread_start (void) +thread_start (void) { /* Create the idle thread. */ struct semaphore idle_started; @@ -120,7 +166,7 @@ thread_start (void) /* Called by the timer interrupt handler at each timer tick. Thus, this function runs in an external interrupt context. */ void -thread_tick (void) +thread_tick (void) { struct thread *t = thread_current (); @@ -141,7 +187,7 @@ thread_tick (void) /* Prints thread statistics. */ void -thread_print_stats (void) +thread_print_stats (void) { printf ("Thread: %lld idle ticks, %lld kernel ticks, %lld user ticks\n", idle_ticks, kernel_ticks, user_ticks); @@ -164,7 +210,7 @@ thread_print_stats (void) Priority scheduling is the goal of Problem 1-3. */ tid_t thread_create (const char *name, int priority, - thread_func *function, void *aux) + thread_func *function, void *aux) { struct thread *t; struct kernel_thread_frame *kf; @@ -185,7 +231,7 @@ thread_create (const char *name, int priority, tid = t->tid = allocate_tid (); /* Prepare thread for first run by initializing its stack. - Do this atomically so intermediate values for the 'stack' + Do this atomically so intermediate values for the 'stack' member cannot be observed. */ old_level = intr_disable (); @@ -219,7 +265,7 @@ thread_create (const char *name, int priority, is usually a better idea to use one of the synchronization primitives in synch.h. */ void -thread_block (void) +thread_block (void) { ASSERT (!intr_context ()); ASSERT (intr_get_level () == INTR_OFF); @@ -237,7 +283,7 @@ thread_block (void) it may expect that it can atomically unblock a thread and update other data. */ void -thread_unblock (struct thread *t) +thread_unblock (struct thread *t) { enum intr_level old_level; @@ -252,7 +298,7 @@ thread_unblock (struct thread *t) /* Returns the name of the running thread. */ const char * -thread_name (void) +thread_name (void) { return thread_current ()->name; } @@ -261,10 +307,10 @@ thread_name (void) This is running_thread() plus a couple of sanity checks. See the big comment at the top of thread.h for details. */ struct thread * -thread_current (void) +thread_current (void) { struct thread *t = running_thread (); - + /* Make sure T is really a thread. If either of these assertions fire, then your thread may have overflowed its stack. Each thread has less than 4 kB @@ -278,7 +324,7 @@ thread_current (void) /* Returns the running thread's tid. */ tid_t -thread_tid (void) +thread_tid (void) { return thread_current ()->tid; } @@ -286,7 +332,7 @@ thread_tid (void) /* Deschedules the current thread and destroys it. Never returns to the caller. */ void -thread_exit (void) +thread_exit (void) { ASSERT (!intr_context ()); @@ -307,15 +353,15 @@ thread_exit (void) /* Yields the CPU. The current thread is not put to sleep and may be scheduled again immediately at the scheduler's whim. */ void -thread_yield (void) +thread_yield (void) { struct thread *cur = thread_current (); enum intr_level old_level; - + ASSERT (!intr_context ()); old_level = intr_disable (); - if (cur != idle_thread) + if (cur != idle_thread) list_push_back (&ready_list, &cur->elem); cur->status = THREAD_READY; schedule (); @@ -341,28 +387,28 @@ thread_foreach (thread_action_func *func, void *aux) /* Sets the current thread's priority to NEW_PRIORITY. */ void -thread_set_priority (int new_priority) +thread_set_priority (int new_priority) { thread_current ()->priority = new_priority; } /* Returns the current thread's priority. */ int -thread_get_priority (void) +thread_get_priority (void) { return thread_current ()->priority; } /* Sets the current thread's nice value to NICE. */ void -thread_set_nice (int nice UNUSED) +thread_set_nice (int nice UNUSED) { /* Not yet implemented. */ } /* Returns the current thread's nice value. */ int -thread_get_nice (void) +thread_get_nice (void) { /* Not yet implemented. */ return 0; @@ -370,7 +416,7 @@ thread_get_nice (void) /* Returns 100 times the system load average. */ int -thread_get_load_avg (void) +thread_get_load_avg (void) { /* Not yet implemented. */ return 0; @@ -378,7 +424,7 @@ thread_get_load_avg (void) /* Returns 100 times the current thread's recent_cpu value. */ int -thread_get_recent_cpu (void) +thread_get_recent_cpu (void) { /* Not yet implemented. */ return 0; @@ -394,13 +440,13 @@ thread_get_recent_cpu (void) ready list. It is returned by next_thread_to_run() as a special case when the ready list is empty. */ static void -idle (void *idle_started_ UNUSED) +idle (void *idle_started_ UNUSED) { struct semaphore *idle_started = idle_started_; idle_thread = thread_current (); sema_up (idle_started); - for (;;) + for (;;) { /* Let someone else run. */ intr_disable (); @@ -424,7 +470,7 @@ idle (void *idle_started_ UNUSED) /* Function used as the basis for a kernel thread. */ static void -kernel_thread (thread_func *function, void *aux) +kernel_thread (thread_func *function, void *aux) { ASSERT (function != NULL); @@ -435,7 +481,7 @@ kernel_thread (thread_func *function, void *aux) /* Returns the running thread. */ struct thread * -running_thread (void) +running_thread (void) { uint32_t *esp; @@ -475,7 +521,7 @@ init_thread (struct thread *t, const char *name, int priority) /* Allocates a SIZE-byte frame at the top of thread T's stack and returns a pointer to the frame's base. */ static void * -alloc_frame (struct thread *t, size_t size) +alloc_frame (struct thread *t, size_t size) { /* Stack data is always allocated in word-size units. */ ASSERT (is_thread (t)); @@ -491,7 +537,7 @@ alloc_frame (struct thread *t, size_t size) will be in the run queue.) If the run queue is empty, return idle_thread. */ static struct thread * -next_thread_to_run (void) +next_thread_to_run (void) { if (list_empty (&ready_list)) return idle_thread; @@ -519,7 +565,7 @@ void thread_schedule_tail (struct thread *prev) { struct thread *cur = running_thread (); - + ASSERT (intr_get_level () == INTR_OFF); /* Mark us as running. */ @@ -538,7 +584,7 @@ thread_schedule_tail (struct thread *prev) pull out the rug under itself. (We don't free initial_thread because its memory was not obtained via palloc().) */ - if (prev != NULL && prev->status == THREAD_DYING && prev != initial_thread) + if (prev != NULL && prev->status == THREAD_DYING && prev != initial_thread) { ASSERT (prev != cur); palloc_free_page (prev); @@ -553,7 +599,7 @@ thread_schedule_tail (struct thread *prev) It's not safe to call printf() until thread_schedule_tail() has completed. */ static void -schedule (void) +schedule (void) { struct thread *cur = running_thread (); struct thread *next = next_thread_to_run (); @@ -570,7 +616,7 @@ schedule (void) /* Returns a tid to use for a new thread. */ static tid_t -allocate_tid (void) +allocate_tid (void) { static tid_t next_tid = 1; tid_t tid; diff --git a/pintos-env/pintos/threads/thread.h b/pintos-env/pintos/threads/thread.h index 7965c06..10d3206 100755 --- a/pintos-env/pintos/threads/thread.h +++ b/pintos-env/pintos/threads/thread.h @@ -1,3 +1,6 @@ +// Author: Claudio Maggioni +// Author: Tommaso Rodolfo Masera + #ifndef THREADS_THREAD_H #define THREADS_THREAD_H @@ -89,7 +92,7 @@ struct thread uint8_t *stack; /* Saved stack pointer. */ int priority; /* Priority. */ struct list_elem allelem; /* List element for all threads list. */ - + uint64_t wakeup; /* Shared between thread.c and synch.c. */ struct list_elem elem; /* List element. */ @@ -126,6 +129,9 @@ const char *thread_name (void); void thread_exit (void) NO_RETURN; void thread_yield (void); +void thread_sleep(uint64_t ticks); +void thread_unsleep(void); + /* Performs some operation on thread t, given auxiliary data AUX. */ typedef void thread_action_func (struct thread *t, void *aux); void thread_foreach (thread_action_func *, void *);