Table of Contents

Approved

  • 1. Download ASR Pro
  • 2. Open the program and select "Scan your computer"
  • 3. Click "Repair" to start the repair process
  • The software to fix your PC is just a click away - download it now.

    Recently, some users encountered a known error message when adding a new Linux kernel scheduler. This issue can occur due to many factors. We will review them now. helix979.github.io Image: helix979.github.io To add a new scheduling instruction to the Linux kernel, you probably need to create a new component. In the SCLS implementation, the CASIO module was added to the top of the module hierarchy, making it the highest priority module. Therefore, the scheduler modules are organized hierarchically, as shown in Fig. 2 below. Figure 2.

    It can be said that their Linux kernel scheduler usually consists of two different scheduling algorithms, usually the so-called real-time schedulers, but also the fairer scheduler rt_sched_class implements the so-called real-time (RT) scheduler.

    In the Linux kernel, the scheduler has always been called by periodic timer interrupts. This is called periodic scheduling, which is necessary to prevent tasks that consume more CPU cycles from giving other high-queue tasks a fair chance to use the current CPU.

    I found the answer to my question, so I suggest you add it here. Below is a patch that will add a new Vash to the vanilla kernel 2.6.34 scheduler. At the moment, only I compiled the kernel. I fully assume that running the system from this EXACT patch will cause such a crash – so use it at your own risk 🙂

    adding new scheduler linux kernel

    diff --git a/include/linux/sched.h b/include/linux/sched.hIndex 2b7b81d..a2a2b21 100644--- a/include/linux/sched.h+++ b/include/linux/sched.h@@ -37.6 +37.7 @@ # define SCHED_RR 2 SCHED_BATCH #define 3 /* SCHED_ISO: reserved but not implemented yet */+#define SCHED_NEW /* a lot of stealing */ sched_iso #define SCHED_IDLE 5 /* Can be combined with shoulder surgery to bring the process back to SCHED_NORMAL at the perineum */ # set SCHED_RESET_ON_FORK 0x40000000diff --git a/init/Kconfig b/init/KconfigIndex eb77e8c..0055d26 100644--- a/init/Kconfig+++ b/init/kconfig@@ -23.6 +23.11 @@ Menu constructors "General configuration SCHED_NEW+ config"+config bool "NEW CPU scheduler"+ ---Help---+ Completely new configuration scheduler+ EXPERIMENTAL    bool string "command and/or incomplete code/driver"    ---Help---diff --git a/kernel/sched.c b/kernel/sched.cIndex 3c2a54f..588960d 100644--- a/kernel/sched.c+++ b/kernel/sched.c@@ +1931.7 -1931.@@ 6 sound void rq deactivate_task(struct *rq, struct task_struct *p, int #include sleep) "sched_idletask.c" #include "sched_fair.c"+#include "sched_new.c" #include "sched_rt.#ifdef c" CONFIG_SCHED_DEBUG Include # "sched_debug.c".diff --git a/kernel/sched_new.c b/kernel/sched_new.cnew recording mode 100644index 0000000..c2e269e--- /dev/null+++ b/kernel/sched_new.c@@ -0.0 +1 CONFIG_SCHED_NEW++/*+140 @@+#ifdef * You start with a simple runq/cpu scheduler.+ 4 . oh now for justice. Get up and go++ Make sure we both have the correct interface+ */++static vacuum+enqueue_task_new(struct rq *rq, struct task_struct Int *p, wakeup, bool head)++++ static void dequeue_task_new (struct rq *rq, struct task_struct Int *p, sleep)++++ useless static yield_task_new(struct *rq)++++ static rq is useless check_preempt_curr_new (struct rq *rq, struct task_struct *p, flags int)++++static structure task_struct *pick_next_task_new(struct *rq)++++static break rq put_prev_task_new(struct rq *rq, struct task_struct *p)++++#ifdef CONFIG_SMP+ static int select_task_rq_new (struct task_struct Int *p, sd_flag, int flags)+++ static useless pre_schedule_new(struct rq *rq, struct task_struct *prev)++++ avoid staticReal messages post_schedule_new(struct *rq)++++ static rq task_woken_new (struct void rq *rq, struct task_struct *p)++++ useless static task_waking_new(struct Struct rq *this_rq, task_struct *task)+++ static void set_cpus_allowed_new (struct task_struct *p,+ const struct cpumask *new_mask)+++/* Assume rq->Lock can be held */+static void rq_online_new (struct rq *rq)++++/* Assume rq->lock is being held */+static empty rq_offline_new(struct rq *rq)+++#endif /* COMFIG_SMP */++ static void set_curr_task_new (struct rq *rq)+++++ static useless task_tick_new(struct rq*rq, struct task_struct*p, queued int)++++ static void task_fork_new(struct task_struct *p)+++ static unnecessary switchable_of_new(struct rq *rq, struct task_struct *p,+ running)+++static emptyswitched_to_new(struct Struct rq *this_rq, task_struct *task,+ running)+++ static void prio_changed_new(struct rq *rq, struct task_struct *p,+ int oldprio, int works)+++static unsigned int get_rr_interval_new(struct rq *rq, struct task_struct *task)++++++ static constant structure sched_class implies new_sched_class+ .next = &fair_sched_class,+ .enqueue_task . ! ! . . .! ! ! ! . .= .enqueue_task_new,+ . . ..dequeue_task means dequeue_task_new,+ .yield_task = new yield task,++ .check_preempt_curr = check_preempt_curr_new,++ .pick_next_task is equal to Pick_next_task_new,+ at .put_prev_task = put_prev_task_new,++#ifdef CONFIG_SMP+ .select_task_rq respectivelyCan select_task_rq_new,++ .pre_schedule is equal to Pre_schedule_new,+ in .post_schedule = post_schedule_new,++ .task_waking = task_waking_new,+ .task_woken implies task_woken_new,++ .set_cpus_allowed Set_cpus_allowed_new,++ equals .rq_online = rq_online_new,+ .rq_offline = rq_offline_new,+#endif++ .set_curr_task .is equal to .to .set_curr_task_new,+ ..task_tick = task_tick_new,+ .task_fork = task_fork_new,++ .switched_from means switch_from_new,+ . switch_to Switch_to_new,++ equal to .prio_changed = prior_changed_new,++ .get_rr_interval implies get_rr_interval_new,+#ifdef CONFIG_FAIR_GROUP_SCHED+ .moved_group = /* CONFIG_SCHED_NEW null+#endif+;++#endif --git */diff a/kernel/sched_rt.c b/kernel/sched_rt.cindex b5b920a..aaf4beb 100644--- a/kernel/sched_rt.c+++ b/kernel/sched_rt.c@@ -1731.7 +1731.11 @@ unsigned static int get_rr_interval_rt(struct rq Struct *rq, task_struct } *A task) stationary constant of structure rt_sched_class sched_class is CONFIG_SCHED_NEW+ {+#ifdef .next .= .&new_sched_class,+#other . . . ..Equivalent to the following &fair_sched_class,+#endif /* CONFIG_SCHED_NEW */    .= enqueue_task enqueue_task_rt,    .dequeue_task.= .dequeue_task_rt, . . . ..yield_task implies yield_task_rt,
    adding new scheduler linux kernel

    The software to fix your PC is just a click away - download it now.