Merge android12-5.10 into android12-5.10-lts

This merges the android12-5.10 branch into the -lts branch, catching
it up with the latest changes in there.

It contains the following commits:

* 29e5b30910 Merge tag 'android12-5.10.240_r00' into android12-5.10
* f75a674cd8 BACKPORT: FROMGIT: f2fs: add sysfs entry for effective lookup mode
* 34d1a222d4 BACKPORT: FROMGIT: f2fs: add lookup_mode mount option
* 1ee9b67d67 ANDROID: 16K: Allocate pad vma on the stack
* 881ba9d97f ANDROID: 16K: Don't copy data vma for maps/smaps output
* f405d5f913 ANDROID: GKI: Update oplus symbol list
* 01179fa874 ANDROID: vendor_hooks: add hooks in cpu_cgroup subsystem
* ba270f8f88 ANDROID: vendor_hooks: Add hooks in reweight_entity
* cbcbb4709f ANDROID: GKI: Export css_task_iter_start()
* 55f3f18177 UPSTREAM: regulator: core: Fix deadlock in create_regulator()
* be7fc88ddf UPSTREAM: coresight-etm4x: add isb() before reading the TRCSTATR
* e386309e11 UPSTREAM: usb: gadget: f_uac2: Fix incorrect setting of bNumEndpoints
* e715fae579 UPSTREAM: f2fs: fix to avoid use GC_AT when setting gc_mode as GC_URGENT_LOW or GC_URGENT_MID
* a6fd2c699e UPSTREAM: usb: typec: fix unreleased fwnode_handle in typec_port_register_altmodes()
* bc21535289 UPSTREAM: xhci: Mitigate failed set dequeue pointer commands
* 270f4894ad ANDROID: refresh ABI following type change
* beaad7f495 UPSTREAM: net/sched: Always pass notifications when child class becomes empty
* 534bbffaa6 ANDROID: bpf: do not fail to load if log is full
* 8d1ac3f3fc BACKPORT: Add support for PIO p flag

Change-Id: Ida6ed6266bba85ea86d89bb635d930321a41140d
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-09-01 11:50:55 +00:00
31 changed files with 2207 additions and 1811 deletions

View File

@@ -277,10 +277,13 @@ Description: Do background GC aggressively when set. Set to 0 by default.
GC approach and turns SSR mode on.
gc urgent low(2): lowers the bar of checking I/O idling in
order to process outstanding discard commands and GC a
little bit aggressively. uses cost benefit GC approach.
little bit aggressively. always uses cost benefit GC approach,
and will override age-threshold GC approach if ATGC is enabled
at the same time.
gc urgent mid(3): does GC forcibly in a period of given
gc_urgent_sleep_time and executes a mid level of I/O idling check.
uses cost benefit GC approach.
always uses cost benefit GC approach, and will override
age-threshold GC approach if ATGC is enabled at the same time.
What: /sys/fs/f2fs/<disk>/gc_urgent_sleep_time
Date: August 2017
@@ -551,3 +554,18 @@ Description: This is a read-only entry to show the value of sb.s_encoding_flags,
SB_ENC_STRICT_MODE_FL 0x00000001
SB_ENC_NO_COMPAT_FALLBACK_FL 0x00000002
============================ ==========
What: /sys/fs/f2fs/<disk>/effective_lookup_mode
Date: August 2025
Contact: "Daniel Lee" <chullee@google.com>
Description:
This is a read-only entry to show the effective directory lookup mode
F2FS is currently using for casefolded directories.
This considers both the "lookup_mode" mount option and the on-disk
encoding flag, SB_ENC_NO_COMPAT_FALLBACK_FL.
Possible values are:
- "perf": Hash-only lookup.
- "compat": Hash-based lookup with a linear search fallback enabled
- "auto:perf": lookup_mode is auto and fallback is disabled on-disk
- "auto:compat": lookup_mode is auto and fallback is enabled on-disk

View File

@@ -309,7 +309,25 @@ age_extent_cache Enable an age extent cache based on rb-tree. It records
data block update frequency of the extent per inode, in
order to provide better temperature hints for data block
allocation.
======================== ============================================================
lookup_mode=%s Control the directory lookup behavior for casefolded
directories. This option has no effect on directories
that do not have the casefold feature enabled.
================== ========================================
Value Description
================== ========================================
perf (Default) Enforces a hash-only lookup.
The linear search fallback is always
disabled, ignoring the on-disk flag.
compat Enables the linear search fallback for
compatibility with directory entries
created by older kernel that used a
different case-folding algorithm.
This mode ignores the on-disk flag.
auto F2FS determines the mode based on the
on-disk `SB_ENC_NO_COMPAT_FALLBACK_FL`
flag.
================== ========================================
Debugfs Entries
===============

View File

@@ -1918,6 +1918,20 @@ accept_ra_pinfo - BOOLEAN
- enabled if accept_ra is enabled.
- disabled if accept_ra is disabled.
ra_honor_pio_pflag - BOOLEAN
The Prefix Information Option P-flag indicates the network can
allocate a unique IPv6 prefix per client using DHCPv6-PD.
This sysctl can be enabled when a userspace DHCPv6-PD client
is running to cause the P-flag to take effect: i.e. the
P-flag suppresses any effects of the A-flag within the same
PIO. For a given PIO, P=1 and A=1 is treated as A=0.
- If disabled, the P-flag is ignored.
- If enabled, the P-flag will disable SLAAC autoconfiguration
for the given Prefix Information Option.
Default: 0 (disabled)
accept_ra_rt_info_min_plen - INTEGER
Minimum prefix length of Route Information in RA.

File diff suppressed because it is too large Load Diff

View File

@@ -125,3 +125,7 @@ type 'enum binder_work_type' changed
enumerator 'BINDER_WORK_FROZEN_BINDER' (9) was added
... 1 other enumerator(s) added
type 'struct ipv6_devconf' changed
member 'u64 android_kabi_reserved4' was removed
member 'union { struct { __u8 ra_honor_pio_pflag; __u8 padding4[7]; }; struct { u64 android_kabi_reserved4; }; union { }; }' was added

View File

@@ -2318,6 +2318,10 @@
sched_setscheduler
sched_setscheduler_nocheck
sched_show_task
cpu_cgrp_subsys
css_task_iter_end
css_task_iter_next
css_task_iter_start
sched_trace_rd_span
sched_uclamp_used
schedule
@@ -2712,6 +2716,8 @@
__traceiter_android_rvh_dequeue_task_fair
__traceiter_android_rvh_enqueue_task
__traceiter_android_rvh_enqueue_task_fair
__traceiter_android_vh_sched_move_task
__traceiter_android_vh_reweight_entity
__traceiter_android_rvh_find_busiest_group
__traceiter_android_rvh_find_busiest_queue
__traceiter_android_rvh_find_energy_efficient_cpu
@@ -2990,6 +2996,8 @@
__tracepoint_android_rvh_dequeue_task_fair
__tracepoint_android_rvh_enqueue_task
__tracepoint_android_rvh_enqueue_task_fair
__tracepoint_android_vh_sched_move_task
__tracepoint_android_vh_reweight_entity
__tracepoint_android_rvh_find_busiest_group
__tracepoint_android_rvh_find_busiest_queue
__tracepoint_android_rvh_find_energy_efficient_cpu

View File

@@ -413,6 +413,11 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_force_compatible_pre);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_force_compatible_post);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_print_transaction_info);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_setscheduler_uclamp);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_reweight_entity);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_move_task);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_cgroup_css_alloc_early);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_cgroup_css_alloc);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_cgroup_css_free);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rproc_recovery);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rproc_recovery_set);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ptype_head);

View File

@@ -1447,18 +1447,20 @@ static void coresight_remove_conns(struct coresight_device *csdev)
}
/**
* coresight_timeout - loop until a bit has changed to a specific register
* state.
* coresight_timeout_action - loop until a bit has changed to a specific register
* state, with a callback after every trial.
* @csa: coresight device access for the device
* @offset: Offset of the register from the base of the device.
* @position: the position of the bit of interest.
* @value: the value the bit should have.
* @cb: Call back after each trial.
*
* Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
* TIMEOUT_US has elapsed, which ever happens first.
*/
int coresight_timeout(struct csdev_access *csa, u32 offset,
int position, int value)
int coresight_timeout_action(struct csdev_access *csa, u32 offset,
int position, int value,
coresight_timeout_cb_t cb)
{
int i;
u32 val;
@@ -1474,7 +1476,8 @@ int coresight_timeout(struct csdev_access *csa, u32 offset,
if (!(val & BIT(position)))
return 0;
}
if (cb)
cb(csa, offset, position, value);
/*
* Delay is arbitrary - the specification doesn't say how long
* we are expected to wait. Extra check required to make sure
@@ -1486,6 +1489,13 @@ int coresight_timeout(struct csdev_access *csa, u32 offset,
return -EAGAIN;
}
EXPORT_SYMBOL_GPL(coresight_timeout_action);
int coresight_timeout(struct csdev_access *csa, u32 offset,
int position, int value)
{
return coresight_timeout_action(csa, offset, position, value, NULL);
}
EXPORT_SYMBOL_GPL(coresight_timeout);
u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset)

View File

@@ -367,6 +367,29 @@ static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
}
#endif /* CONFIG_ETM4X_IMPDEF_FEATURE */
static void etm4x_sys_ins_barrier(struct csdev_access *csa, u32 offset, int pos, int val)
{
if (!csa->io_mem)
isb();
}
/*
* etm4x_wait_status: Poll for TRCSTATR.<pos> == <val>. While using system
* instruction to access the trace unit, each access must be separated by a
* synchronization barrier. See ARM IHI0064H.b section "4.3.7 Synchronization of
* register updates", for system instructions section, in "Notes":
*
* "In particular, whenever disabling or enabling the trace unit, a poll of
* TRCSTATR needs explicit synchronization between each read of TRCSTATR"
*/
static int etm4x_wait_status(struct csdev_access *csa, int pos, int val)
{
if (!csa->io_mem)
return coresight_timeout_action(csa, TRCSTATR, pos, val,
etm4x_sys_ins_barrier);
return coresight_timeout(csa, TRCSTATR, pos, val);
}
static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
{
int i, rc;
@@ -398,7 +421,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
isb();
/* wait for TRCSTATR.IDLE to go up */
if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 1))
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
if (drvdata->nr_pe)
@@ -489,7 +512,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
isb();
/* wait for TRCSTATR.IDLE to go back down to '0' */
if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 0))
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
@@ -802,10 +825,25 @@ static void etm4_disable_hw(void *info)
tsb_csync();
etm4x_relaxed_write32(csa, control, TRCPRGCTLR);
/*
* As recommended by section 4.3.7 ("Synchronization when using system
* instructions to progrom the trace unit") of ARM IHI 0064H.b, the
* self-hosted trace analyzer must perform a Context synchronization
* event between writing to the TRCPRGCTLR and reading the TRCSTATR.
*/
if (!csa->io_mem)
isb();
/* wait for TRCSTATR.PMSTABLE to go to '1' */
if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1))
if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1))
dev_err(etm_dev,
"timeout while waiting for PM stable Trace Status\n");
/*
* As recommended by section 4.3.7 (Synchronization of register updates)
* of ARM IHI 0064H.b.
*/
isb();
/* read the status of the single shot comparators */
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
config->ss_status[i] =
@@ -1582,7 +1620,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
etm4_os_lock(drvdata);
/* wait for TRCSTATR.PMSTABLE to go up */
if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) {
if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) {
dev_err(etm_dev,
"timeout while waiting for PM Stable Status\n");
etm4_os_unlock(drvdata);
@@ -1672,7 +1710,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
state->trcpdcr = etm4x_read32(csa, TRCPDCR);
/* wait for TRCSTATR.IDLE to go up */
if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) {
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
etm4_os_unlock(drvdata);

View File

@@ -1698,12 +1698,49 @@ static const struct file_operations constraint_flags_fops = {
#define REG_STR_SIZE 64
static void link_and_create_debugfs(struct regulator *regulator, struct regulator_dev *rdev,
struct device *dev)
{
int err = 0;
if (dev) {
regulator->dev = dev;
/* Add a link to the device sysfs entry */
err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj,
regulator->supply_name);
if (err) {
rdev_dbg(rdev, "could not add device link %s: %pe\n",
dev->kobj.name, ERR_PTR(err));
/* non-fatal */
}
}
if (err != -EEXIST) {
regulator->debugfs = debugfs_create_dir(regulator->supply_name, rdev->debugfs);
if (IS_ERR(regulator->debugfs)) {
rdev_dbg(rdev, "Failed to create debugfs directory\n");
regulator->debugfs = NULL;
}
}
if (regulator->debugfs) {
debugfs_create_u32("uA_load", 0444, regulator->debugfs,
&regulator->uA_load);
debugfs_create_u32("min_uV", 0444, regulator->debugfs,
&regulator->voltage[PM_SUSPEND_ON].min_uV);
debugfs_create_u32("max_uV", 0444, regulator->debugfs,
&regulator->voltage[PM_SUSPEND_ON].max_uV);
debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
regulator, &constraint_flags_fops);
}
}
static struct regulator *create_regulator(struct regulator_dev *rdev,
struct device *dev,
const char *supply_name)
{
struct regulator *regulator;
int err = 0;
lockdep_assert_held_once(&rdev->mutex.base);
@@ -1736,38 +1773,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
list_add(&regulator->list, &rdev->consumer_list);
if (dev) {
regulator->dev = dev;
/* Add a link to the device sysfs entry */
err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj,
supply_name);
if (err) {
rdev_dbg(rdev, "could not add device link %s: %pe\n",
dev->kobj.name, ERR_PTR(err));
/* non-fatal */
}
}
if (err != -EEXIST) {
regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
if (IS_ERR(regulator->debugfs)) {
rdev_dbg(rdev, "Failed to create debugfs directory\n");
regulator->debugfs = NULL;
}
}
if (regulator->debugfs) {
debugfs_create_u32("uA_load", 0444, regulator->debugfs,
&regulator->uA_load);
debugfs_create_u32("min_uV", 0444, regulator->debugfs,
&regulator->voltage[PM_SUSPEND_ON].min_uV);
debugfs_create_u32("max_uV", 0444, regulator->debugfs,
&regulator->voltage[PM_SUSPEND_ON].max_uV);
debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
regulator, &constraint_flags_fops);
}
/*
* Check now if the regulator is an always on regulator - if
* it is then we don't need to do nearly so much work for
@@ -1996,6 +2001,9 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
regulator_unlock_two(rdev, r, &ww_ctx);
/* rdev->supply was created in set_supply() */
link_and_create_debugfs(rdev->supply, r, &rdev->dev);
/*
* In set_machine_constraints() we may have turned this regulator on
* but we couldn't propagate to the supply if it hadn't been resolved
@@ -2119,6 +2127,8 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
return regulator;
}
link_and_create_debugfs(regulator, rdev, dev);
rdev->open_count++;
if (get_type == EXCLUSIVE_GET) {
rdev->exclusive = 1;

View File

@@ -1131,6 +1131,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
uac2->as_in_alt = 0;
}
std_ac_if_desc.bNumEndpoints = 0;
if (FUOUT_EN(uac2_opts) || FUIN_EN(uac2_opts)) {
uac2->int_ep = usb_ep_autoconfig(gadget, &fs_ep_int_desc);
if (!uac2->int_ep) {

View File

@@ -1039,7 +1039,7 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
td_to_noop(xhci, ring, cached_td, false);
cached_td->cancel_status = TD_CLEARED;
}
td_to_noop(xhci, ring, td, false);
td->cancel_status = TD_CLEARING_CACHE;
cached_td = td;
break;

View File

@@ -2041,6 +2041,7 @@ void typec_port_register_altmodes(struct typec_port *port,
altmodes[index] = alt;
index++;
}
fwnode_handle_put(altmodes_node);
}
EXPORT_SYMBOL_GPL(typec_port_register_altmodes);

View File

@@ -16,6 +16,21 @@
#include "xattr.h"
#include <trace/events/f2fs.h>
static inline bool f2fs_should_fallback_to_linear(struct inode *dir)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
switch (f2fs_get_lookup_mode(sbi)) {
case LOOKUP_PERF:
return false;
case LOOKUP_COMPAT:
return true;
case LOOKUP_AUTO:
return !sb_no_casefold_compat_fallback(sbi->sb);
}
return false;
}
#ifdef CONFIG_UNICODE
extern struct kmem_cache *f2fs_cf_name_slab;
#endif
@@ -438,7 +453,7 @@ start_find_entry:
out:
#if IS_ENABLED(CONFIG_UNICODE)
if (!sb_no_casefold_compat_fallback(dir->i_sb) &&
if (f2fs_should_fallback_to_linear(dir) &&
IS_CASEFOLDED(dir) && !de && use_hash) {
use_hash = false;
goto start_find_entry;

View File

@@ -4553,6 +4553,47 @@ static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
return false;
}
enum f2fs_lookup_mode {
LOOKUP_PERF,
LOOKUP_COMPAT,
LOOKUP_AUTO,
};
/*
* For bit-packing in f2fs_mount_info->alloc_mode
*/
#define ALLOC_MODE_BITS 1
#define LOOKUP_MODE_BITS 2
#define ALLOC_MODE_SHIFT 0
#define LOOKUP_MODE_SHIFT (ALLOC_MODE_SHIFT + ALLOC_MODE_BITS)
#define ALLOC_MODE_MASK (((1 << ALLOC_MODE_BITS) - 1) << ALLOC_MODE_SHIFT)
#define LOOKUP_MODE_MASK (((1 << LOOKUP_MODE_BITS) - 1) << LOOKUP_MODE_SHIFT)
static inline int f2fs_get_alloc_mode(struct f2fs_sb_info *sbi)
{
return (F2FS_OPTION(sbi).alloc_mode & ALLOC_MODE_MASK) >> ALLOC_MODE_SHIFT;
}
static inline void f2fs_set_alloc_mode(struct f2fs_sb_info *sbi, int mode)
{
F2FS_OPTION(sbi).alloc_mode &= ~ALLOC_MODE_MASK;
F2FS_OPTION(sbi).alloc_mode |= (mode << ALLOC_MODE_SHIFT);
}
static inline enum f2fs_lookup_mode f2fs_get_lookup_mode(struct f2fs_sb_info *sbi)
{
return (F2FS_OPTION(sbi).alloc_mode & LOOKUP_MODE_MASK) >> LOOKUP_MODE_SHIFT;
}
static inline void f2fs_set_lookup_mode(struct f2fs_sb_info *sbi,
enum f2fs_lookup_mode mode)
{
F2FS_OPTION(sbi).alloc_mode &= ~LOOKUP_MODE_MASK;
F2FS_OPTION(sbi).alloc_mode |= (mode << LOOKUP_MODE_SHIFT);
}
#define EFSBADCRC EBADMSG /* Bad CRC detected */
#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */

View File

@@ -206,6 +206,8 @@ static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
switch (sbi->gc_mode) {
case GC_IDLE_CB:
case GC_URGENT_LOW:
case GC_URGENT_MID:
gc_mode = GC_CB;
break;
case GC_IDLE_GREEDY:

View File

@@ -2621,7 +2621,7 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
return SIT_I(sbi)->last_victim[ALLOC_NEXT];
/* find segments from 0 to reuse freed segments */
if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
if (f2fs_get_alloc_mode(sbi) == ALLOC_MODE_REUSE)
return 0;
return curseg->segno;

View File

@@ -156,6 +156,7 @@ enum {
Opt_nogc_merge,
Opt_memory_mode,
Opt_age_extent_cache,
Opt_lookup_mode,
Opt_err,
};
@@ -233,6 +234,7 @@ static match_table_t f2fs_tokens = {
{Opt_nogc_merge, "nogc_merge"},
{Opt_memory_mode, "memory=%s"},
{Opt_age_extent_cache, "age_extent_cache"},
{Opt_lookup_mode, "lookup_mode=%s"},
{Opt_err, NULL},
};
@@ -954,9 +956,9 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
return -ENOMEM;
if (!strcmp(name, "default")) {
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
f2fs_set_alloc_mode(sbi, ALLOC_MODE_DEFAULT);
} else if (!strcmp(name, "reuse")) {
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
f2fs_set_alloc_mode(sbi, ALLOC_MODE_REUSE);
} else {
kfree(name);
return -EINVAL;
@@ -1171,6 +1173,23 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
}
kfree(name);
break;
case Opt_lookup_mode:
name = match_strdup(&args[0]);
if (!name)
return -ENOMEM;
if (!strcmp(name, "perf")) {
f2fs_set_lookup_mode(sbi, LOOKUP_PERF);
} else if (!strcmp(name, "compat")) {
f2fs_set_lookup_mode(sbi, LOOKUP_COMPAT);
} else if (!strcmp(name, "auto")) {
f2fs_set_lookup_mode(sbi, LOOKUP_AUTO);
} else {
kfree(name);
return -EINVAL;
}
kfree(name);
break;
default:
f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
p);
@@ -1899,9 +1918,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
if (sbi->sb->s_flags & SB_INLINECRYPT)
seq_puts(seq, ",inlinecrypt");
if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
if (f2fs_get_alloc_mode(sbi) == ALLOC_MODE_DEFAULT)
seq_printf(seq, ",alloc_mode=%s", "default");
else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
else if (f2fs_get_alloc_mode(sbi) == ALLOC_MODE_REUSE)
seq_printf(seq, ",alloc_mode=%s", "reuse");
if (test_opt(sbi, DISABLE_CHECKPOINT))
@@ -1930,6 +1949,13 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW)
seq_printf(seq, ",memory=%s", "low");
if (f2fs_get_lookup_mode(sbi) == LOOKUP_PERF)
seq_show_option(seq, "lookup_mode", "perf");
else if (f2fs_get_lookup_mode(sbi) == LOOKUP_COMPAT)
seq_show_option(seq, "lookup_mode", "compat");
else if (f2fs_get_lookup_mode(sbi) == LOOKUP_AUTO)
seq_show_option(seq, "lookup_mode", "auto");
return 0;
}
@@ -1952,6 +1978,11 @@ static void default_options(struct f2fs_sb_info *sbi, bool remount)
F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
if (le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main) <=
SMALL_VOLUME_SEGMENTS)
f2fs_set_alloc_mode(sbi, ALLOC_MODE_REUSE);
else
f2fs_set_alloc_mode(sbi, ALLOC_MODE_DEFAULT);
F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
@@ -1983,6 +2014,8 @@ static void default_options(struct f2fs_sb_info *sbi, bool remount)
#endif
f2fs_build_fault_attr(sbi, 0, 0);
f2fs_set_lookup_mode(sbi, LOOKUP_PERF);
}
#ifdef CONFIG_QUOTA

View File

@@ -220,6 +220,22 @@ static ssize_t encoding_flags_show(struct f2fs_attr *a,
le16_to_cpu(F2FS_RAW_SUPER(sbi)->s_encoding_flags));
}
static ssize_t effective_lookup_mode_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
switch (f2fs_get_lookup_mode(sbi)) {
case LOOKUP_PERF:
return sysfs_emit(buf, "perf\n");
case LOOKUP_COMPAT:
return sysfs_emit(buf, "compat\n");
case LOOKUP_AUTO:
if (sb_no_casefold_compat_fallback(sbi->sb))
return sysfs_emit(buf, "auto:perf\n");
return sysfs_emit(buf, "auto:compat\n");
}
return 0;
}
static ssize_t mounted_time_sec_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
@@ -764,6 +780,7 @@ F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
F2FS_GENERAL_RO_ATTR(unusable);
F2FS_GENERAL_RO_ATTR(encoding);
F2FS_GENERAL_RO_ATTR(encoding_flags);
F2FS_GENERAL_RO_ATTR(effective_lookup_mode);
F2FS_GENERAL_RO_ATTR(mounted_time_sec);
F2FS_GENERAL_RO_ATTR(main_blkaddr);
F2FS_GENERAL_RO_ATTR(pending_discard);
@@ -882,6 +899,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(current_reserved_blocks),
ATTR_LIST(encoding),
ATTR_LIST(encoding_flags),
ATTR_LIST(effective_lookup_mode),
ATTR_LIST(mounted_time_sec),
#ifdef CONFIG_F2FS_STAT_FS
ATTR_LIST(cp_foreground_calls),

View File

@@ -338,7 +338,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
}
start = vma->vm_start;
end = vma->vm_end;
end = VMA_PAD_START(vma);
show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
/*
@@ -391,13 +391,12 @@ done:
static int show_map(struct seq_file *m, void *v)
{
struct vm_area_struct *pad_vma = get_pad_vma(v);
struct vm_area_struct *vma = get_data_vma(v);
struct vm_area_struct *vma = v;
if (vma_pages(vma))
show_map_vma(m, vma);
show_map_pad_vma(vma, pad_vma, m, show_map_vma, false);
show_map_pad_vma(vma, m, show_map_vma, false);
return 0;
}
@@ -814,9 +813,10 @@ static void smap_gather_stats(struct vm_area_struct *vma,
struct mem_size_stats *mss, unsigned long start)
{
const struct mm_walk_ops *ops = &smaps_walk_ops;
unsigned long end = VMA_PAD_START(vma);
/* Invalid start */
if (start >= vma->vm_end)
if (start >= end)
return;
#ifdef CONFIG_SHMEM
@@ -836,7 +836,15 @@ static void smap_gather_stats(struct vm_area_struct *vma,
unsigned long shmem_swapped = shmem_swap_usage(vma);
if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
!(vma->vm_flags & VM_WRITE))) {
!(vma->vm_flags & VM_WRITE)) &&
/*
* Only if we don't have padding can we use the fast path
* shmem_inode_info->swapped for shmem_swapped.
*
* Else we'll walk the page table to calculate
* shmem_swapped, (excluding the padding region).
*/
end == vma->vm_end) {
mss->swap += shmem_swapped;
} else {
mss->check_shmem_swap = true;
@@ -846,9 +854,9 @@ static void smap_gather_stats(struct vm_area_struct *vma,
#endif
/* mmap_lock is held in m_start */
if (!start)
walk_page_vma(vma, ops, mss);
walk_page_range(vma->vm_mm, vma->vm_start, end, ops, mss);
else
walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
walk_page_range(vma->vm_mm, start, end, ops, mss);
}
#define SEQ_PUT_DEC(str, val) \
@@ -895,8 +903,7 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
static int show_smap(struct seq_file *m, void *v)
{
struct vm_area_struct *pad_vma = get_pad_vma(v);
struct vm_area_struct *vma = get_data_vma(v);
struct vm_area_struct *vma = v;
struct mem_size_stats mss;
memset(&mss, 0, sizeof(mss));
@@ -913,7 +920,7 @@ static int show_smap(struct seq_file *m, void *v)
seq_putc(m, '\n');
}
SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start);
SEQ_PUT_DEC("Size: ", VMA_PAD_START(vma) - vma->vm_start);
SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma));
seq_puts(m, " kB\n");
@@ -928,7 +935,7 @@ static int show_smap(struct seq_file *m, void *v)
show_smap_vma_flags(m, vma);
show_pad:
show_map_pad_vma(vma, pad_vma, m, show_smap, true);
show_map_pad_vma(vma, m, show_smap, true);
return 0;
}

View File

@@ -475,6 +475,10 @@ extern int coresight_enable(struct coresight_device *csdev);
extern void coresight_disable(struct coresight_device *csdev);
extern int coresight_timeout(struct csdev_access *csa, u32 offset,
int position, int value);
typedef void (*coresight_timeout_cb_t) (struct csdev_access *, u32, int, int);
extern int coresight_timeout_action(struct csdev_access *csa, u32 offset,
int position, int value,
coresight_timeout_cb_t cb);
extern int coresight_claim_device(struct coresight_device *csdev);
extern int coresight_claim_device_unlocked(struct coresight_device *csdev);

View File

@@ -84,7 +84,7 @@ struct ipv6_devconf {
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_BACKPORT_OK(4);
ANDROID_KABI_BACKPORT_USE(4, struct { __u8 ra_honor_pio_pflag; __u8 padding4[7]; });
};
struct ipv6_params {

View File

@@ -26,12 +26,7 @@ extern unsigned long vma_pad_pages(struct vm_area_struct *vma);
extern void madvise_vma_pad_pages(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma);
extern struct vm_area_struct *get_data_vma(struct vm_area_struct *vma);
extern void show_map_pad_vma(struct vm_area_struct *vma,
struct vm_area_struct *pad,
struct seq_file *m, void *func, bool smaps);
extern void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
@@ -57,18 +52,7 @@ static inline void madvise_vma_pad_pages(struct vm_area_struct *vma,
{
}
static inline struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma)
{
return NULL;
}
static inline struct vm_area_struct *get_data_vma(struct vm_area_struct *vma)
{
return vma;
}
static inline void show_map_pad_vma(struct vm_area_struct *vma,
struct vm_area_struct *pad,
struct seq_file *m, void *func, bool smaps)
{
}

View File

@@ -42,10 +42,22 @@ struct prefix_info {
#endif
#if defined(__BIG_ENDIAN_BITFIELD)
__u8 onlink : 1,
autoconf : 1,
autoconf : 1,
# ifdef __GENKSYMS__
reserved : 6;
# else
routeraddr : 1,
preferpd : 1,
reserved : 4;
# endif
#elif defined(__LITTLE_ENDIAN_BITFIELD)
# ifdef __GENKSYMS__
__u8 reserved : 6,
# else
__u8 reserved : 4,
preferpd : 1,
routeraddr : 1,
# endif
autoconf : 1,
onlink : 1;
#else

View File

@@ -402,6 +402,25 @@ DECLARE_HOOK(android_vh_mmput,
DECLARE_HOOK(android_vh_sched_pelt_multiplier,
TP_PROTO(unsigned int old, unsigned int cur, int *ret),
TP_ARGS(old, cur, ret));
DECLARE_HOOK(android_vh_reweight_entity,
TP_PROTO(struct sched_entity *se),
TP_ARGS(se));
struct cgroup_subsys_state;
DECLARE_HOOK(android_vh_sched_move_task,
TP_PROTO(struct task_struct *tsk),
TP_ARGS(tsk));
DECLARE_HOOK(android_vh_cpu_cgroup_css_alloc,
TP_PROTO(struct task_group *tg, struct cgroup_subsys_state *parent_css),
TP_ARGS(tg, parent_css));
DECLARE_HOOK(android_vh_cpu_cgroup_css_alloc_early,
TP_PROTO(struct task_group *parent),
TP_ARGS(parent));
DECLARE_HOOK(android_vh_cpu_cgroup_css_free,
TP_PROTO(struct cgroup_subsys_state *css),
TP_ARGS(css));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_SCHED_H */

View File

@@ -12743,8 +12743,14 @@ skip_full_check:
env->verification_time = ktime_get_ns() - start_time;
print_verification_stats(env);
if (log->level && bpf_verifier_log_full(log))
ret = -ENOSPC;
// ANDROID: Do not fail to load if log buffer passed in from userspace
// is too small. The bpf log logic is refactored in the 6.4 kernel
// acknowledging the shortcomings of this approch. Instead of backporting
// the significant changes, simply ignore the fact that the log is full.
// For more information see commit 121664093803: bpf: Switch BPF verifier
// log to be a rotating log by default
//if (log->level && bpf_verifier_log_full(log))
// ret = -ENOSPC;
if (log->level && !log->ubuf) {
ret = -EFAULT;
goto err_release_maps;

View File

@@ -4741,6 +4741,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
spin_unlock_irq(&css_set_lock);
}
EXPORT_SYMBOL_GPL(css_task_iter_start);
/**
* css_task_iter_next - return the next task for the iterator
@@ -4774,6 +4775,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
return it->cur_task;
}
EXPORT_SYMBOL_GPL(css_task_iter_next);
/**
* css_task_iter_end - finish task iteration
@@ -4796,6 +4798,7 @@ void css_task_iter_end(struct css_task_iter *it)
if (it->cur_task)
put_task_struct(it->cur_task);
}
EXPORT_SYMBOL_GPL(css_task_iter_end);
static void cgroup_procs_release(struct kernfs_open_file *of)
{

View File

@@ -7956,6 +7956,7 @@ void sched_move_task(struct task_struct *tsk)
struct rq_flags rf;
struct rq *rq;
trace_android_vh_sched_move_task(tsk);
rq = task_rq_lock(tsk, &rf);
update_rq_clock(rq);
@@ -7996,6 +7997,7 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
struct task_group *tg;
if (!parent) {
trace_android_vh_cpu_cgroup_css_alloc_early(parent);
/* This is early initialization for the top cgroup */
return &root_task_group.css;
}
@@ -8004,6 +8006,8 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
trace_android_vh_cpu_cgroup_css_alloc(tg, parent_css);
return &tg->css;
}
@@ -8044,6 +8048,7 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
* Relies on the RCU grace period between css_released() and this.
*/
sched_free_group(tg);
trace_android_vh_cpu_cgroup_css_free(css);
}
/*
@@ -8883,7 +8888,7 @@ struct cgroup_subsys cpu_cgrp_subsys = {
.early_init = true,
.threaded = true,
};
EXPORT_SYMBOL_GPL(cpu_cgrp_subsys);
#endif /* CONFIG_CGROUP_SCHED */
void dump_cpu_task(int cpu)

View File

@@ -3115,6 +3115,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
update_load_set(&se->load, weight);
trace_android_vh_reweight_entity(se);
#ifdef CONFIG_SMP
do {
u32 divider = get_pelt_divider(&se->avg);

View File

@@ -257,19 +257,11 @@ static const struct vm_operations_struct pad_vma_ops = {
};
/*
* Returns a new VMA representing the padding in @vma, if no padding
* in @vma returns NULL.
* Initialize @pad VMA fields with information from the original @vma.
*/
struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma)
static void init_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *pad)
{
struct vm_area_struct *pad;
if (!is_pgsize_migration_enabled() || !(vma->vm_flags & VM_PAD_MASK))
return NULL;
pad = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
*pad = *vma;
memcpy(pad, vma, sizeof(struct vm_area_struct));
/* Remove file */
pad->vm_file = NULL;
@@ -285,60 +277,34 @@ struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma)
/* Remove padding bits */
pad->vm_flags &= ~VM_PAD_MASK;
return pad;
}
/*
* Returns a new VMA exclusing the padding from @vma; if no padding in
* @vma returns @vma.
* Calls the show_pad_vma_fn on the @pad VMA.
*/
struct vm_area_struct *get_data_vma(struct vm_area_struct *vma)
void show_map_pad_vma(struct vm_area_struct *vma, struct seq_file *m,
void *func, bool smaps)
{
struct vm_area_struct *data;
struct vm_area_struct pad;
if (!is_pgsize_migration_enabled() || !(vma->vm_flags & VM_PAD_MASK))
return vma;
data = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
*data = *vma;
/* Adjust the end to the start of the padding section */
data->vm_end = VMA_PAD_START(data);
return data;
}
/*
* Calls the show_pad_vma_fn on the @pad VMA, and frees the copies of @vma
* and @pad.
*/
void show_map_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *pad,
struct seq_file *m, void *func, bool smaps)
{
if (!pad)
return;
/*
* This cannot happen. If @pad vma was allocated the corresponding
* @vma should have the VM_PAD_MASK bit(s) set.
*/
BUG_ON(!(vma->vm_flags & VM_PAD_MASK));
init_pad_vma(vma, &pad);
/*
* This cannot happen. @pad is a section of the original VMA.
* Therefore @vma cannot be null if @pad is not null.
*/
BUG_ON(!vma);
/* The pad VMA should be anonymous. */
BUG_ON(pad.vm_file);
/* The pad VMA should be PROT_NONE. */
BUG_ON(pad.vm_flags & (VM_READ|VM_WRITE|VM_EXEC));
/* The pad VMA itself cannot have padding; infinite recursion */
BUG_ON(pad.vm_flags & VM_PAD_MASK);
if (smaps)
((show_pad_smaps_fn)func)(m, pad);
((show_pad_smaps_fn)func)(m, &pad);
else
((show_pad_maps_fn)func)(m, pad);
kfree(pad);
kfree(vma);
((show_pad_maps_fn)func)(m, &pad);
}
/*

View File

@@ -240,6 +240,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
.disable_policy = 0,
.rpl_seg_enabled = 0,
.ra_honor_pio_pflag = 0,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -297,6 +298,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
.disable_policy = 0,
.rpl_seg_enabled = 0,
.ra_honor_pio_pflag = 0,
};
/* Check if link is ready: is it up and is a valid qdisc available */
@@ -2734,6 +2736,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
u32 addr_flags = 0;
struct inet6_dev *in6_dev;
struct net *net = dev_net(dev);
bool ignore_autoconf = false;
pinfo = (struct prefix_info *) opt;
@@ -2827,7 +2830,8 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
/* Try to figure out our local address for this prefix */
if (pinfo->autoconf && in6_dev->cnf.autoconf) {
ignore_autoconf = READ_ONCE(in6_dev->cnf.ra_honor_pio_pflag) && pinfo->preferpd;
if (pinfo->autoconf && in6_dev->cnf.autoconf && !ignore_autoconf) {
struct in6_addr addr;
bool tokenized = false, dev_addr_generated = false;
@@ -6793,6 +6797,15 @@ static const struct ctl_table addrconf_sysctl[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "ra_honor_pio_pflag",
.data = &ipv6_devconf.ra_honor_pio_pflag,
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
#ifdef CONFIG_IPV6_ROUTER_PREF
{
.procname = "accept_ra_rtr_pref",