sched: do not allocate window cpu arrays separately

These are allocated extremely frequently.

Allocate them with CONFIG_NR_CPUS upon struct ravg's allocation.

This will break walt debug tracings.

Change-Id: I8f67bb00fb916e04bfc954d812a3b99a3a5495c2
Signed-off-by: Park Ju Hyung <qkrwngud825@gmail.com>
Signed-off-by: Pranav Vashi <neobuddy89@gmail.com>
This commit is contained in:
Park Ju Hyung
2020-03-18 18:54:42 +09:00
committed by Pranav Vashi
parent 06e0bcff04
commit c89b15482e
4 changed files with 16 additions and 43 deletions

View File

@@ -558,7 +558,6 @@ extern u32 sched_get_init_task_load(struct task_struct *p);
extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin,
u32 fmax);
extern int sched_set_boost(int enable);
extern void free_task_load_ptrs(struct task_struct *p);
#define RAVG_HIST_SIZE_MAX 5
#define NUM_BUSY_BUCKETS 10
@@ -602,7 +601,7 @@ struct ravg {
u32 sum, demand;
u32 coloc_demand;
u32 sum_history[RAVG_HIST_SIZE_MAX];
u32 *curr_window_cpu, *prev_window_cpu;
u32 curr_window_cpu[CONFIG_NR_CPUS], prev_window_cpu[CONFIG_NR_CPUS];
u32 curr_window, prev_window;
u16 active_windows;
u32 pred_demand;
@@ -623,7 +622,6 @@ static inline int sched_set_boost(int enable)
{
return -EINVAL;
}
static inline void free_task_load_ptrs(struct task_struct *p) { }
static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
u32 fmin, u32 fmax) { }

View File

@@ -2165,7 +2165,6 @@ bad_fork_cleanup_perf:
perf_event_free_task(p);
bad_fork_cleanup_policy:
lockdep_free_task(p);
free_task_load_ptrs(p);
#ifdef CONFIG_NUMA
mpol_put(p->mempolicy);
bad_fork_cleanup_threadgroup_lock:

View File

@@ -7681,7 +7681,6 @@ void sched_exit(struct task_struct *p)
enqueue_task(rq, p, 0);
clear_ed_task(p, rq);
task_rq_unlock(rq, p, &rf);
free_task_load_ptrs(p);
}
#endif /* CONFIG_SCHED_WALT */

View File

@@ -2031,11 +2031,6 @@ void init_new_task_load(struct task_struct *p)
memset(&p->ravg, 0, sizeof(struct ravg));
p->cpu_cycles = 0;
p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32),
GFP_KERNEL | __GFP_NOFAIL);
p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32),
GFP_KERNEL | __GFP_NOFAIL);
if (init_load_pct) {
init_load_windows = div64_u64((u64)init_load_pct *
(u64)sched_ravg_window, 100);
@@ -2052,46 +2047,28 @@ void init_new_task_load(struct task_struct *p)
p->misfit = false;
}
/*
* kfree() may wakeup kswapd. So this function should NOT be called
* with any CPU's rq->lock acquired.
*/
void free_task_load_ptrs(struct task_struct *p)
{
kfree(p->ravg.curr_window_cpu);
kfree(p->ravg.prev_window_cpu);
/*
* update_task_ravg() can be called for exiting tasks. While the
* function itself ensures correct behavior, the corresponding
* trace event requires that these pointers be NULL.
*/
p->ravg.curr_window_cpu = NULL;
p->ravg.prev_window_cpu = NULL;
}
void reset_task_stats(struct task_struct *p)
{
u32 sum = 0;
u32 *curr_window_ptr = NULL;
u32 *prev_window_ptr = NULL;
u32 sum;
u32 curr_window_saved[CONFIG_NR_CPUS];
u32 prev_window_saved[CONFIG_NR_CPUS];
if (exiting_task(p)) {
sum = EXITING_TASK_MARKER;
memset(&p->ravg, 0, sizeof(struct ravg));
/* Retain EXITING_TASK marker */
p->ravg.sum_history[0] = sum;
} else {
curr_window_ptr = p->ravg.curr_window_cpu;
prev_window_ptr = p->ravg.prev_window_cpu;
memset(curr_window_ptr, 0, sizeof(u32) * nr_cpu_ids);
memset(prev_window_ptr, 0, sizeof(u32) * nr_cpu_ids);
memcpy(curr_window_saved, p->ravg.curr_window_cpu, sizeof(curr_window_saved));
memcpy(prev_window_saved, p->ravg.prev_window_cpu, sizeof(prev_window_saved));
memset(&p->ravg, 0, sizeof(struct ravg));
memcpy(p->ravg.curr_window_cpu, curr_window_saved, sizeof(curr_window_saved));
memcpy(p->ravg.prev_window_cpu, prev_window_saved, sizeof(prev_window_saved));
}
memset(&p->ravg, 0, sizeof(struct ravg));
p->ravg.curr_window_cpu = curr_window_ptr;
p->ravg.prev_window_cpu = prev_window_ptr;
/* Retain EXITING_TASK marker */
p->ravg.sum_history[0] = sum;
}
void mark_task_starting(struct task_struct *p)