From 7132f64c17437d0d7a5f6607b09ded13c53dfef9 Mon Sep 17 00:00:00 2001 From: Wanpeng li Date: Thu, 19 Nov 2015 18:11:58 +0800 Subject: [PATCH] sched/deadline: fix earliest_dl.next logic earliest_dl.next should cache deadline of the earliest ready task that is also enqueued in the pushable rbtree, as pull algorithm uses this information to find candidates for migration: if the earliest_dl.next deadline of source rq is earlier than the earliest_dl.curr deadline of destination rq, the task from the source rq can be pulled. However, current implementation only guarantees that earliest_dl.next is the deadline of the next ready task instead of the next pushable task; which will result in potentially holding both rqs' lock and find nothing to migrate because of affinity constraints. In addition, current logic doesn't update the next candidate for pushing in pick_next_task_dl(), even if the running task is never eligible. This patch fixes both problems by updating earliest_dl.next when pushable dl task is enqueued/dequeued, similar to what we already do for RT. Signed-off-by: Wanpeng li --- kernel/sched/deadline.c | 63 ++++++++++--------------------------------------- 1 file changed, 12 insertions(+), 51 deletions(-) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 8b0a15e..9c2047f 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -87,6 +87,8 @@ void init_dl_rq(struct dl_rq *dl_rq) #ifdef CONFIG_SMP +static struct task_struct *pick_next_pushable_dl_task(struct rq *rq); + static inline int dl_overloaded(struct rq *rq) { return atomic_read(&rq->rd->dlo_count); @@ -181,11 +183,15 @@ static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) rb_link_node(&p->pushable_dl_tasks, parent, link); rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); + + if (dl_time_before(p->dl.deadline, dl_rq->earliest_dl.next)) + dl_rq->earliest_dl.next = p->dl.deadline; } static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) { struct dl_rq *dl_rq = &rq->dl; + struct task_struct *next_pushable; if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) return; @@ -199,6 +205,12 @@ static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); RB_CLEAR_NODE(&p->pushable_dl_tasks); + + next_pushable = pick_next_pushable_dl_task(rq); + if (next_pushable) + dl_rq->earliest_dl.next = next_pushable->dl.deadline; + else + dl_rq->earliest_dl.next = 0; } static inline int has_pushable_dl_tasks(struct rq *rq) @@ -782,42 +794,14 @@ static void update_curr_dl(struct rq *rq) #ifdef CONFIG_SMP -static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu); - -static inline u64 next_deadline(struct rq *rq) -{ - struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu); - - if (next && dl_prio(next->prio)) - return next->dl.deadline; - else - return 0; -} - static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) { struct rq *rq = rq_of_dl_rq(dl_rq); if (dl_rq->earliest_dl.curr == 0 || dl_time_before(deadline, dl_rq->earliest_dl.curr)) { - /* - * If the dl_rq had no -deadline tasks, or if the new task - * has shorter deadline than the current one on dl_rq, we - * know that the previous earliest becomes our next earliest, - * as the new task becomes the earliest itself. - */ - dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr; dl_rq->earliest_dl.curr = deadline; cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1); - } else if (dl_rq->earliest_dl.next == 0 || - dl_time_before(deadline, dl_rq->earliest_dl.next)) { - /* - * On the other hand, if the new -deadline task has a - * a later deadline than the earliest one on dl_rq, but - * it is earlier than the next (if any), we must - * recompute the next-earliest. - */ - dl_rq->earliest_dl.next = next_deadline(rq); } } @@ -839,7 +823,6 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); dl_rq->earliest_dl.curr = entry->deadline; - dl_rq->earliest_dl.next = next_deadline(rq); cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1); } } @@ -1274,28 +1257,6 @@ static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) return 0; } -/* Returns the second earliest -deadline task, NULL otherwise */ -static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu) -{ - struct rb_node *next_node = rq->dl.rb_leftmost; - struct sched_dl_entity *dl_se; - struct task_struct *p = NULL; - -next_node: - next_node = rb_next(next_node); - if (next_node) { - dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node); - p = dl_task_of(dl_se); - - if (pick_dl_task(rq, p, cpu)) - return p; - - goto next_node; - } - - return NULL; -} - /* * Return the earliest pushable rq's task, which is suitable to be executed * on the CPU, NULL otherwise: -- 1.9.1