BACKPORT: time: hrtimer: Introduce hrtimer_next_event_without()

The next set of changes will need to compute the time to the next
hrtimer event over all hrtimers except for the scheduler tick one.

To that end introduce a new helper function,
hrtimer_next_event_without(), for computing the time until the next
hrtimer event over all timers except for one and modify the underlying
code in __hrtimer_next_event_base() to prepare it for being called by
that new function.

No intentional code behavior changes.

Cherry-picked from a59855cd8c613ba4bb95147f6176360d95f75e60

 - Fixed conflict with tick_nohz_stopped_cpu not appearing in the
   chunk context. Trivial fix.

Change-Id: I0dae9e08e19559efae9697800738c5522ab5933f
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Todd Kjos <tkjos@google.com>
Cc: Joel Fernandes <joelaf@google.com>
Cc: Paven Kondati <pkondeti@qti.qualcomm.com>
Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
This commit is contained in:
Rafael J. Wysocki
2018-04-03 23:17:00 +02:00
committed by Todd Kjos
parent f69cfc8ef9
commit 6277dd586f
2 changed files with 44 additions and 4 deletions

View File

@@ -405,6 +405,7 @@ static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
}
extern u64 hrtimer_get_next_event(void);
extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
extern bool hrtimer_active(const struct hrtimer *timer);

View File

@@ -463,7 +463,8 @@ static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
#endif
}
static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base,
const struct hrtimer *exclude)
{
struct hrtimer_clock_base *base = cpu_base->clock_base;
unsigned int active = cpu_base->active_bases;
@@ -479,9 +480,24 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
next = timerqueue_getnext(&base->active);
timer = container_of(next, struct hrtimer, node);
if (timer == exclude) {
/* Get to the next timer in the queue. */
struct rb_node *rbn = rb_next(&next->node);
next = rb_entry_safe(rbn, struct timerqueue_node, node);
if (!next)
continue;
timer = container_of(next, struct hrtimer, node);
}
expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
if (expires < expires_next) {
expires_next = expires;
/* Skip cpu_base update if a timer is being excluded. */
if (exclude)
continue;
hrtimer_update_next_timer(cpu_base, timer);
}
}
@@ -560,7 +576,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
if (!cpu_base->hres_active)
return;
expires_next = __hrtimer_get_next_event(cpu_base);
expires_next = __hrtimer_get_next_event(cpu_base, NULL);
if (skip_equal && expires_next == cpu_base->expires_next)
return;
@@ -1076,7 +1092,30 @@ u64 hrtimer_get_next_event(void)
raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (!__hrtimer_hres_active(cpu_base))
expires = __hrtimer_get_next_event(cpu_base);
expires = __hrtimer_get_next_event(cpu_base, NULL);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
return expires;
}
/**
* hrtimer_next_event_without - time until next expiry event w/o one timer
* @exclude: timer to exclude
*
* Returns the next expiry time over all timers except for the @exclude one or
* KTIME_MAX if none of them is pending.
*/
u64 hrtimer_next_event_without(const struct hrtimer *exclude)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
u64 expires = KTIME_MAX;
unsigned long flags;
raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (__hrtimer_hres_active(cpu_base))
expires = __hrtimer_get_next_event(cpu_base, exclude);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
@@ -1318,7 +1357,7 @@ retry:
__hrtimer_run_queues(cpu_base, now);
/* Reevaluate the clock bases for the next expiry */
expires_next = __hrtimer_get_next_event(cpu_base);
expires_next = __hrtimer_get_next_event(cpu_base, NULL);
/*
* Store the new expiry value so the migration code can verify
* against it.