Changeset 151c050 in mainline


Ignore:
Timestamp:
2024-01-15T14:33:03Z (4 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
c7ceacf
Parents:
8996582
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2023-04-15 16:15:29)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-15 14:33:03)
Message:

Rethink scheduler entry points

Changes the way scheduler is entered, to eliminate some unnecessary
locking and interrupt disables.

Location:
kernel/generic
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/proc/scheduler.h

    r8996582 r151c050  
    4141#include <atomic.h>
    4242#include <adt/list.h>
     43#include <abi/proc/thread.h>
    4344
    4445#define RQ_COUNT          16
     
    5657
    5758extern void scheduler_fpu_lazy_request(void);
    58 extern void scheduler(void);
    59 extern void scheduler_locked(ipl_t);
    6059extern void kcpulb(void *arg);
    6160
    6261extern void sched_print_list(void);
     62
     63extern void scheduler_run(void) __attribute__((noreturn));
     64extern void scheduler_enter(state_t);
    6365
    6466/*
  • kernel/generic/include/proc/thread.h

    r8996582 r151c050  
    113113         */
    114114        context_t saved_context;
    115         ipl_t saved_ipl;
    116115
    117116        /**
     
    244243extern errno_t thread_join_timeout(thread_t *, uint32_t, unsigned int);
    245244
     245extern void thread_yield(void);
     246
    246247extern void thread_print_list(bool);
    247248extern thread_t *thread_find_by_id(thread_id_t);
  • kernel/generic/src/main/main.c

    r8996582 r151c050  
    285285
    286286        /*
    287          * This call to scheduler() will return to kinit,
     287         * This call to scheduler_run() will return to kinit,
    288288         * starting the thread of kernel threads.
    289289         */
    290         scheduler();
     290        scheduler_run();
    291291        /* not reached */
    292292}
     
    356356
    357357        semaphore_up(&ap_completion_semaphore);
    358         scheduler();
     358        scheduler_run();
    359359        /* not reached */
    360360}
  • kernel/generic/src/proc/scheduler.c

    r8996582 r151c050  
    302302}
    303303
    304 void scheduler(void)
    305 {
    306         ipl_t ipl = interrupts_disable();
    307 
    308         if (atomic_load(&haltstate))
    309                 halt();
    310 
    311         if (THREAD) {
    312                 irq_spinlock_lock(&THREAD->lock, false);
    313         }
    314 
    315         scheduler_locked(ipl);
     304void scheduler_run(void)
     305{
     306        assert(interrupts_disabled());
     307        assert(THREAD == NULL);
     308        assert(CPU != NULL);
     309
     310        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
     311
     312        context_t ctx;
     313        context_save(&ctx);
     314        context_set(&ctx, FADDR(scheduler_separated_stack),
     315            (uintptr_t) CPU_LOCAL->stack, STACK_SIZE);
     316        context_restore(&ctx);
     317
     318        unreachable();
    316319}
    317320
     
    431434 *
    432435 */
    433 void scheduler_locked(ipl_t ipl)
    434 {
     436void scheduler_enter(state_t new_state)
     437{
     438        ipl_t ipl = interrupts_disable();
     439
    435440        assert(CPU != NULL);
    436 
    437         if (THREAD) {
    438                 /* Update thread kernel accounting */
    439                 THREAD->kcycles += get_cycle() - THREAD->last_cycle;
    440 
    441                 fpu_cleanup();
    442 
    443                 if (!context_save(&THREAD->saved_context)) {
    444                         /*
    445                          * This is the place where threads leave scheduler();
    446                          */
    447 
    448                         irq_spinlock_unlock(&THREAD->lock, false);
    449                         interrupts_restore(THREAD->saved_ipl);
    450 
    451                         return;
    452                 }
    453 
    454                 /*
    455                  * Interrupt priority level of preempted thread is recorded
    456                  * here to facilitate scheduler() invocations from
    457                  * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
    458                  *
    459                  */
    460                 THREAD->saved_ipl = ipl;
     441        assert(THREAD != NULL);
     442
     443        fpu_cleanup();
     444
     445        irq_spinlock_lock(&THREAD->lock, false);
     446        THREAD->state = new_state;
     447
     448        /* Update thread kernel accounting */
     449        THREAD->kcycles += get_cycle() - THREAD->last_cycle;
     450
     451        if (!context_save(&THREAD->saved_context)) {
     452                /*
     453                 * This is the place where threads leave scheduler();
     454                 */
     455
     456                irq_spinlock_unlock(&THREAD->lock, false);
     457                interrupts_restore(ipl);
     458                return;
    461459        }
    462460
     
    504502        assert(interrupts_disabled());
    505503
     504        if (atomic_load(&haltstate))
     505                halt();
     506
    506507        if (THREAD) {
    507508                after_thread_ran_arch();
     
    678679                 *
    679680                 */
    680                 scheduler();
     681                thread_yield();
    681682        } else {
    682683                /*
  • kernel/generic/src/proc/thread.c

    r8996582 r151c050  
    314314        current_initialize((current_t *) thread->kstack);
    315315
    316         ipl_t ipl = interrupts_disable();
    317         thread->saved_ipl = interrupts_read();
    318         interrupts_restore(ipl);
    319 
    320316        str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
    321317
     
    518514        }
    519515
    520         irq_spinlock_lock(&THREAD->lock, true);
    521         THREAD->state = Exiting;
    522         irq_spinlock_unlock(&THREAD->lock, true);
    523 
    524         scheduler();
    525 
    526         panic("should never be reached");
     516        scheduler_enter(Exiting);
     517        unreachable();
    527518}
    528519
     
    623614        }
    624615
    625         ipl_t ipl = interrupts_disable();
    626         irq_spinlock_lock(&THREAD->lock, false);
    627         THREAD->state = Sleeping;
    628         scheduler_locked(ipl);
     616        scheduler_enter(Sleeping);
    629617
    630618        if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
     
    736724
    737725        (void) waitq_sleep_timeout(&wq, usec);
     726}
     727
     728/** Allow other threads to run. */
     729void thread_yield(void)
     730{
     731        assert(THREAD != NULL);
     732        scheduler_enter(Running);
    738733}
    739734
  • kernel/generic/src/time/clock.c

    r8996582 r151c050  
    187187        if (THREAD) {
    188188                if (current_clock_tick >= CPU_LOCAL->preempt_deadline && PREEMPTION_ENABLED) {
    189                         scheduler();
     189                        thread_yield();
    190190#ifdef CONFIG_UDEBUG
    191191                        /*
Note: See TracChangeset for help on using the changeset viewer.