Snap for 10922982 from e938b62aff to android13-5.15-keystone-qcom-release

Change-Id: I2daa65ca2c28a61ef6b04dba197471981d5b4a9c
This commit is contained in:
Android Build Coastguard Worker
2023-10-10 00:00:24 +00:00
71 changed files with 2563 additions and 1804 deletions

View File

@@ -24,6 +24,7 @@ _aarch64_additional_kmi_symbol_lists = [
"android/abi_gki_aarch64_honor",
"android/abi_gki_aarch64_imx",
"android/abi_gki_aarch64_lenovo",
"android/abi_gki_aarch64_microsoft",
"android/abi_gki_aarch64_moto",
"android/abi_gki_aarch64_mtk",
"android/abi_gki_aarch64_mtktv",

View File

@@ -141,3 +141,6 @@
unregister_shrinker
wait_on_page_bit
__wake_up
# preserved by --additions-only
android_kmalloc_64_create

File diff suppressed because it is too large Load Diff

View File

@@ -162,6 +162,7 @@
clk_register
clk_register_composite
clk_round_rate
clk_set_min_rate
clk_set_parent
clk_set_rate
clk_unprepare
@@ -469,6 +470,7 @@
dma_heap_get_dev
dma_heap_get_drvdata
dma_heap_get_name
dma_heap_put
d_make_root
dmam_alloc_attrs
dma_map_page_attrs
@@ -797,6 +799,7 @@
get_cpu_iowait_time_us
get_device
get_device_system_crosststamp
get_each_dmabuf
__get_free_pages
get_kernel_pages
get_net_ns_by_fd
@@ -1302,6 +1305,7 @@
of_prop_next_u32
of_pwm_xlate_with_flags
of_reserved_mem_device_init_by_idx
of_reserved_mem_device_init_by_name
of_reserved_mem_device_release
of_reserved_mem_lookup
of_reset_control_array_get
@@ -1327,6 +1331,7 @@
panic
panic_notifier_list
param_array_ops
param_get_bool
param_get_charp
param_get_hexint
param_get_int
@@ -1921,6 +1926,7 @@
__tasklet_schedule
tasklet_setup
tasklet_unlock_wait
tasklist_lock
task_may_not_preempt
thermal_cooling_device_unregister
thermal_of_cooling_device_register
@@ -1952,6 +1958,7 @@
__traceiter_android_rvh_schedule
__traceiter_android_rvh_select_task_rq_rt
__traceiter_android_rvh_tick_entry
__traceiter_android_vh_cma_alloc_bypass
__traceiter_android_vh_cma_drain_all_pages_bypass
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
@@ -1959,6 +1966,7 @@
__traceiter_android_vh_dump_throttled_rt_tasks
__traceiter_android_vh_ftrace_format_check
__traceiter_android_vh_iommu_iovad_free_iova
__traceiter_android_vh_ipi_stop
__traceiter_android_vh_mem_cgroup_alloc
__traceiter_android_vh_mmc_sd_update_cmdline_timing
__traceiter_android_vh_mmc_sd_update_dataline_timing
@@ -1986,6 +1994,7 @@
__tracepoint_android_rvh_schedule
__tracepoint_android_rvh_select_task_rq_rt
__tracepoint_android_rvh_tick_entry
__tracepoint_android_vh_cma_alloc_bypass
__tracepoint_android_vh_cma_drain_all_pages_bypass
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
@@ -1993,6 +2002,7 @@
__tracepoint_android_vh_dump_throttled_rt_tasks
__tracepoint_android_vh_ftrace_format_check
__tracepoint_android_vh_iommu_iovad_free_iova
__tracepoint_android_vh_ipi_stop
__tracepoint_android_vh_mem_cgroup_alloc
__tracepoint_android_vh_mmc_sd_update_cmdline_timing
__tracepoint_android_vh_mmc_sd_update_dataline_timing

View File

@@ -0,0 +1,10 @@
[abi_symbol_list]
# required by igb.ko
dev_trans_start
eth_get_headlen
flow_rule_match_eth_addrs
__hw_addr_sync_dev
__hw_addr_unsync_dev
ndo_dflt_fdb_add
pci_sriov_set_totalvfs
ptp_find_pin

View File

@@ -156,6 +156,7 @@
kern_unmount
kfree
kfree_skb
kick_process
kill_anon_super
kmalloc_caches
kmem_cache_alloc
@@ -230,12 +231,14 @@
nla_reserve
nonseekable_open
nr_cpu_ids
nr_running
__num_online_cpus
of_css
page_endio
__page_file_index
__page_mapcount
page_mapping
page_referenced
page_to_lruvec
param_ops_uint
__per_cpu_offset
@@ -286,6 +289,7 @@
register_sysctl_table
register_tcf_proto_ops
remove_proc_subtree
root_mem_cgroup
rtc_read_alarm
__rtnl_link_unregister
sched_clock
@@ -409,9 +413,12 @@
__traceiter_android_vh_dm_bufio_shrink_scan_bypass
__traceiter_android_vh_drain_all_pages_bypass
__traceiter_android_vh_dup_task_struct
__traceiter_android_vh_exit_check
__traceiter_android_vh_exit_mm
__traceiter_android_vh_exit_signal
__traceiter_android_vh_exit_signal_whether_wake
__traceiter_android_vh_free_task
__traceiter_android_vh_freeze_whether_wake
__traceiter_android_vh_futex_sleep_start
__traceiter_android_vh_futex_wait_end
__traceiter_android_vh_futex_wait_start
@@ -533,9 +540,12 @@
__tracepoint_android_vh_do_traversal_lruvec
__tracepoint_android_vh_drain_all_pages_bypass
__tracepoint_android_vh_dup_task_struct
__tracepoint_android_vh_exit_check
__tracepoint_android_vh_exit_mm
__tracepoint_android_vh_exit_signal
__tracepoint_android_vh_exit_signal_whether_wake
__tracepoint_android_vh_free_task
__tracepoint_android_vh_freeze_whether_wake
__tracepoint_android_vh_futex_sleep_start
__tracepoint_android_vh_futex_wait_end
__tracepoint_android_vh_futex_wait_start
@@ -629,6 +639,7 @@
wait_for_completion_io_timeout
__wake_up
wake_up_process
wake_up_state
wq_worker_comm
is_ashmem_file
zero_pfn

View File

@@ -1919,6 +1919,7 @@
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_dup_task_struct
__traceiter_android_vh_enable_thermal_genl_check
__traceiter_android_vh_ipi_stop
__traceiter_android_vh_scheduler_tick
__traceiter_android_vh_setscheduler_uclamp
@@ -1975,6 +1976,7 @@
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_dup_task_struct
__tracepoint_android_vh_enable_thermal_genl_check
__tracepoint_android_vh_ipi_stop
__tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_setscheduler_uclamp

View File

@@ -103,6 +103,7 @@
blk_get_request
blk_ksm_get_slot_idx
blk_ksm_register
blk_ksm_reprogram_all_keys
blk_mark_disk_dead
blk_mq_alloc_request
blk_mq_alloc_request_hctx
@@ -1461,6 +1462,7 @@
page_endio
page_is_ram
page_mapping
page_owner_inited
page_pool_alloc_pages
page_pool_create
page_pool_destroy
@@ -1991,6 +1993,7 @@
set_next_entity
set_normalized_timespec64
__SetPageMovable
__set_page_owner
set_task_cpu
set_user_nice
sg_alloc_table

View File

@@ -3164,17 +3164,21 @@
__tracepoint_android_vh_show_mem
# required by unisoc_mm_reclaim.ko
__traceiter_android_vh_clear_page_migrating
__traceiter_android_vh_do_page_trylock
__traceiter_android_vh_handle_failed_page_trylock
__traceiter_android_vh_page_trylock_clear
__traceiter_android_vh_page_trylock_get_result
__traceiter_android_vh_page_trylock_set
__traceiter_android_vh_set_page_migrating
__traceiter_android_vh_shrink_slab_bypass
__tracepoint_android_vh_clear_page_migrating
__tracepoint_android_vh_do_page_trylock
__tracepoint_android_vh_handle_failed_page_trylock
__tracepoint_android_vh_page_trylock_clear
__tracepoint_android_vh_page_trylock_get_result
__tracepoint_android_vh_page_trylock_set
__tracepoint_android_vh_set_page_migrating
__tracepoint_android_vh_shrink_slab_bypass
# required by unisoc_mm_slab.ko
@@ -3254,6 +3258,9 @@
usb_store_new_id
usb_unpoison_urb
# required by sprd_time_sync_cp.ko
pvclock_gtod_register_notifier
# required by vha.ko
clk_bulk_get
device_wakeup_disable

View File

@@ -1147,6 +1147,11 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
}
}
#endif
#define __HAVE_ARCH_PMDP_INVALIDATE_AD
extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
/*
* Page table pages are page-aligned. The lower half of the top
* level is used for userspace and the top half for the kernel.

View File

@@ -110,9 +110,11 @@
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
#define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP)
#define _PAGE_SOFTW4 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW4)
#else
#define _PAGE_NX (_AT(pteval_t, 0))
#define _PAGE_DEVMAP (_AT(pteval_t, 0))
#define _PAGE_SOFTW4 (_AT(pteval_t, 0))
#endif
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)

View File

@@ -259,6 +259,103 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
static inline bool pte_flags_need_flush(unsigned long oldflags,
unsigned long newflags,
bool ignore_access)
{
/*
* Flags that require a flush when cleared but not when they are set.
* Only include flags that would not trigger spurious page-faults.
* Non-present entries are not cached. Hardware would set the
* dirty/access bit if needed without a fault.
*/
const pteval_t flush_on_clear = _PAGE_DIRTY | _PAGE_PRESENT |
_PAGE_ACCESSED;
const pteval_t software_flags = _PAGE_SOFTW1 | _PAGE_SOFTW2 |
_PAGE_SOFTW3 | _PAGE_SOFTW4;
const pteval_t flush_on_change = _PAGE_RW | _PAGE_USER | _PAGE_PWT |
_PAGE_PCD | _PAGE_PSE | _PAGE_GLOBAL | _PAGE_PAT |
_PAGE_PAT_LARGE | _PAGE_PKEY_BIT0 | _PAGE_PKEY_BIT1 |
_PAGE_PKEY_BIT2 | _PAGE_PKEY_BIT3 | _PAGE_NX;
unsigned long diff = oldflags ^ newflags;
BUILD_BUG_ON(flush_on_clear & software_flags);
BUILD_BUG_ON(flush_on_clear & flush_on_change);
BUILD_BUG_ON(flush_on_change & software_flags);
/* Ignore software flags */
diff &= ~software_flags;
if (ignore_access)
diff &= ~_PAGE_ACCESSED;
/*
* Did any of the 'flush_on_clear' flags was clleared set from between
* 'oldflags' and 'newflags'?
*/
if (diff & oldflags & flush_on_clear)
return true;
/* Flush on modified flags. */
if (diff & flush_on_change)
return true;
/* Ensure there are no flags that were left behind */
if (IS_ENABLED(CONFIG_DEBUG_VM) &&
(diff & ~(flush_on_clear | software_flags | flush_on_change))) {
VM_WARN_ON_ONCE(1);
return true;
}
return false;
}
/*
* pte_needs_flush() checks whether permissions were demoted and require a
* flush. It should only be used for userspace PTEs.
*/
static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
{
/* !PRESENT -> * ; no need for flush */
if (!(pte_flags(oldpte) & _PAGE_PRESENT))
return false;
/* PFN changed ; needs flush */
if (pte_pfn(oldpte) != pte_pfn(newpte))
return true;
/*
* check PTE flags; ignore access-bit; see comment in
* ptep_clear_flush_young().
*/
return pte_flags_need_flush(pte_flags(oldpte), pte_flags(newpte),
true);
}
#define pte_needs_flush pte_needs_flush
/*
* huge_pmd_needs_flush() checks whether permissions were demoted and require a
* flush. It should only be used for userspace huge PMDs.
*/
static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
{
/* !PRESENT -> * ; no need for flush */
if (!(pmd_flags(oldpmd) & _PAGE_PRESENT))
return false;
/* PFN changed ; needs flush */
if (pmd_pfn(oldpmd) != pmd_pfn(newpmd))
return true;
/*
* check PMD flags; do not ignore access-bit; see
* pmdp_clear_flush_young().
*/
return pte_flags_need_flush(pmd_flags(oldpmd), pmd_flags(newpmd),
false);
}
#define huge_pmd_needs_flush huge_pmd_needs_flush
#endif /* !MODULE */
#endif /* _ASM_X86_TLBFLUSH_H */

View File

@@ -611,6 +611,16 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
return young;
}
pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
/*
* No flush is necessary. Once an invalid PTE is established, the PTE's
* access and dirty bits cannot be updated.
*/
return pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
}
#endif
/**

View File

@@ -31,6 +31,11 @@
*/
static const int read_expire = HZ / 2; /* max time before a read is submitted. */
static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
/*
* Time after which to dispatch lower priority requests even if higher
* priority requests are pending.
*/
static const int prio_aging_expire = 10 * HZ;
static const int writes_starved = 2; /* max times reads can starve a write */
static const int fifo_batch = 16; /* # of sequential requests treated as one
by the above parameters. For throughput. */
@@ -51,17 +56,16 @@ enum dd_prio {
enum { DD_PRIO_COUNT = 3 };
/* I/O statistics per I/O priority. */
/*
* I/O statistics per I/O priority. It is fine if these counters overflow.
* What matters is that these counters are at least as wide as
* log2(max_outstanding_requests).
*/
struct io_stats_per_prio {
local_t inserted;
local_t merged;
local_t dispatched;
local_t completed;
};
/* I/O statistics for all I/O priorities (enum dd_prio). */
struct io_stats {
struct io_stats_per_prio stats[DD_PRIO_COUNT];
uint32_t inserted;
uint32_t merged;
uint32_t dispatched;
atomic_t completed;
};
/*
@@ -74,6 +78,7 @@ struct dd_per_prio {
struct list_head fifo_list[DD_DIR_COUNT];
/* Next request in FIFO order. Read, write or both are NULL. */
struct request *next_rq[DD_DIR_COUNT];
struct io_stats_per_prio stats;
};
struct deadline_data {
@@ -88,8 +93,6 @@ struct deadline_data {
unsigned int batching; /* number of sequential requests made */
unsigned int starved; /* times reads have starved writes */
struct io_stats __percpu *stats;
/*
* settings that change how the i/o scheduler behaves
*/
@@ -98,38 +101,12 @@ struct deadline_data {
int writes_starved;
int front_merges;
u32 async_depth;
int prio_aging_expire;
spinlock_t lock;
spinlock_t zone_lock;
};
/* Count one event of type 'event_type' and with I/O priority 'prio' */
#define dd_count(dd, event_type, prio) do { \
struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \
\
BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
local_inc(&io_stats->stats[(prio)].event_type); \
put_cpu_ptr(io_stats); \
} while (0)
/*
* Returns the total number of dd_count(dd, event_type, prio) calls across all
* CPUs. No locking or barriers since it is fine if the returned sum is slightly
* outdated.
*/
#define dd_sum(dd, event_type, prio) ({ \
unsigned int cpu; \
u32 sum = 0; \
\
BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
for_each_present_cpu(cpu) \
sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \
stats[(prio)].event_type); \
sum; \
})
/* Maps an I/O priority class to a deadline scheduler priority. */
static const enum dd_prio ioprio_class_to_prio[] = {
[IOPRIO_CLASS_NONE] = DD_BE_PRIO,
@@ -247,7 +224,9 @@ static void dd_merged_requests(struct request_queue *q, struct request *req,
const u8 ioprio_class = dd_rq_ioclass(next);
const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
dd_count(dd, merged, prio);
lockdep_assert_held(&dd->lock);
dd->per_prio[prio].stats.merged++;
/*
* if next expires before rq, assign its expire time to rq
@@ -284,6 +263,16 @@ deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
deadline_remove_request(rq->q, per_prio, rq);
}
/* Number of requests queued for a given priority level. */
static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
{
const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
lockdep_assert_held(&dd->lock);
return stats->inserted - atomic_read(&stats->completed);
}
/*
* deadline_check_fifo returns 0 if there are no expired requests on the fifo,
* 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
@@ -413,12 +402,27 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
return rq;
}
/*
* Returns true if and only if @rq started after @latest_start where
* @latest_start is in jiffies.
*/
static bool started_after(struct deadline_data *dd, struct request *rq,
unsigned long latest_start)
{
unsigned long start_time = (unsigned long)rq->fifo_time;
start_time -= dd->fifo_expire[rq_data_dir(rq)];
return time_after(start_time, latest_start);
}
/*
* deadline_dispatch_requests selects the best request according to
* read/write expire, fifo_batch, etc
* read/write expire, fifo_batch, etc and with a start time <= @latest_start.
*/
static struct request *__dd_dispatch_request(struct deadline_data *dd,
struct dd_per_prio *per_prio)
struct dd_per_prio *per_prio,
unsigned long latest_start)
{
struct request *rq, *next_rq;
enum dd_data_dir data_dir;
@@ -430,6 +434,8 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
if (!list_empty(&per_prio->dispatch)) {
rq = list_first_entry(&per_prio->dispatch, struct request,
queuelist);
if (started_after(dd, rq, latest_start))
return NULL;
list_del_init(&rq->queuelist);
goto done;
}
@@ -507,6 +513,9 @@ dispatch_find_request:
dd->batching = 0;
dispatch_request:
if (started_after(dd, rq, latest_start))
return NULL;
/*
* rq is the selected appropriate request.
*/
@@ -515,7 +524,7 @@ dispatch_request:
done:
ioprio_class = dd_rq_ioclass(rq);
prio = ioprio_class_to_prio[ioprio_class];
dd_count(dd, dispatched, prio);
dd->per_prio[prio].stats.dispatched++;
/*
* If the request needs its target zone locked, do it.
*/
@@ -524,6 +533,34 @@ done:
return rq;
}
/*
* Check whether there are any requests with priority other than DD_RT_PRIO
* that were inserted more than prio_aging_expire jiffies ago.
*/
static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd,
unsigned long now)
{
struct request *rq;
enum dd_prio prio;
int prio_cnt;
lockdep_assert_held(&dd->lock);
prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) +
!!dd_queued(dd, DD_IDLE_PRIO);
if (prio_cnt < 2)
return NULL;
for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
rq = __dd_dispatch_request(dd, &dd->per_prio[prio],
now - dd->prio_aging_expire);
if (rq)
return rq;
}
return NULL;
}
/*
* Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
*
@@ -535,15 +572,26 @@ done:
static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
{
struct deadline_data *dd = hctx->queue->elevator->elevator_data;
const unsigned long now = jiffies;
struct request *rq;
enum dd_prio prio;
spin_lock(&dd->lock);
rq = dd_dispatch_prio_aged_requests(dd, now);
if (rq)
goto unlock;
/*
* Next, dispatch requests in priority order. Ignore lower priority
* requests if any higher priority requests are pending.
*/
for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
rq = __dd_dispatch_request(dd, &dd->per_prio[prio]);
if (rq)
rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now);
if (rq || dd_queued(dd, prio))
break;
}
unlock:
spin_unlock(&dd->lock);
return rq;
@@ -574,8 +622,9 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
struct request_queue *q = hctx->queue;
struct deadline_data *dd = q->elevator->elevator_data;
struct blk_mq_tags *tags = hctx->sched_tags;
unsigned int shift = tags->bitmap_tags->sb.shift;
dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
dd->async_depth = max(1U, 3 * (1U << shift) / 4);
sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
}
@@ -594,12 +643,21 @@ static void dd_exit_sched(struct elevator_queue *e)
for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
struct dd_per_prio *per_prio = &dd->per_prio[prio];
const struct io_stats_per_prio *stats = &per_prio->stats;
uint32_t queued;
WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
}
free_percpu(dd->stats);
spin_lock(&dd->lock);
queued = dd_queued(dd, prio);
spin_unlock(&dd->lock);
WARN_ONCE(queued != 0,
"statistics for priority %d: i %u m %u d %u c %u\n",
prio, stats->inserted, stats->merged,
stats->dispatched, atomic_read(&stats->completed));
}
kfree(dd);
}
@@ -624,11 +682,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
eq->elevator_data = dd;
dd->stats = alloc_percpu_gfp(typeof(*dd->stats),
GFP_KERNEL | __GFP_ZERO);
if (!dd->stats)
goto free_dd;
for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
struct dd_per_prio *per_prio = &dd->per_prio[prio];
@@ -644,15 +697,13 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
dd->front_merges = 1;
dd->last_dir = DD_WRITE;
dd->fifo_batch = fifo_batch;
dd->prio_aging_expire = prio_aging_expire;
spin_lock_init(&dd->lock);
spin_lock_init(&dd->zone_lock);
q->elevator = eq;
return 0;
free_dd:
kfree(dd);
put_eq:
kobject_put(&eq->kobj);
return ret;
@@ -735,8 +786,11 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
blk_req_zone_write_unlock(rq);
prio = ioprio_class_to_prio[ioprio_class];
dd_count(dd, inserted, prio);
rq->elv.priv[0] = (void *)(uintptr_t)1;
per_prio = &dd->per_prio[prio];
if (!rq->elv.priv[0]) {
per_prio->stats.inserted++;
rq->elv.priv[0] = (void *)(uintptr_t)1;
}
if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
blk_mq_free_requests(&free);
@@ -745,9 +799,9 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
trace_block_rq_insert(rq);
per_prio = &dd->per_prio[prio];
if (at_head) {
list_add(&rq->queuelist, &per_prio->dispatch);
rq->fifo_time = jiffies;
} else {
deadline_add_rq_rb(per_prio, rq);
@@ -825,15 +879,17 @@ static void dd_finish_request(struct request *rq)
struct deadline_data *dd = q->elevator->elevator_data;
const u8 ioprio_class = dd_rq_ioclass(rq);
const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
struct dd_per_prio *per_prio = &dd->per_prio[prio];
/*
* The block layer core may call dd_finish_request() without having
* called dd_insert_requests(). Hence only update statistics for
* requests for which dd_insert_requests() has been called. See also
* blk_mq_request_bypass_insert().
* called dd_insert_requests(). Skip requests that bypassed I/O
* scheduling. See also blk_mq_request_bypass_insert().
*/
if (rq->elv.priv[0])
dd_count(dd, completed, prio);
if (!rq->elv.priv[0])
return;
atomic_inc(&per_prio->stats.completed);
if (blk_queue_is_zoned(q)) {
unsigned long flags;
@@ -879,6 +935,7 @@ static ssize_t __FUNC(struct elevator_queue *e, char *page) \
#define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire);
SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
SHOW_INT(deadline_front_merges_show, dd->front_merges);
SHOW_INT(deadline_async_depth_show, dd->async_depth);
@@ -908,6 +965,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)
STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
@@ -926,6 +984,7 @@ static struct elv_fs_entry deadline_attrs[] = {
DD_ATTR(front_merges),
DD_ATTR(async_depth),
DD_ATTR(fifo_batch),
DD_ATTR(prio_aging_expire),
__ATTR_NULL
};
@@ -1017,38 +1076,48 @@ static int dd_async_depth_show(void *data, struct seq_file *m)
return 0;
}
/* Number of requests queued for a given priority level. */
static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
{
return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
}
static int dd_queued_show(void *data, struct seq_file *m)
{
struct request_queue *q = data;
struct deadline_data *dd = q->elevator->elevator_data;
u32 rt, be, idle;
spin_lock(&dd->lock);
rt = dd_queued(dd, DD_RT_PRIO);
be = dd_queued(dd, DD_BE_PRIO);
idle = dd_queued(dd, DD_IDLE_PRIO);
spin_unlock(&dd->lock);
seq_printf(m, "%u %u %u\n", rt, be, idle);
seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO),
dd_queued(dd, DD_BE_PRIO),
dd_queued(dd, DD_IDLE_PRIO));
return 0;
}
/* Number of requests owned by the block driver for a given priority. */
static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
{
return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio)
- dd_sum(dd, completed, prio);
const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
lockdep_assert_held(&dd->lock);
return stats->dispatched + stats->merged -
atomic_read(&stats->completed);
}
static int dd_owned_by_driver_show(void *data, struct seq_file *m)
{
struct request_queue *q = data;
struct deadline_data *dd = q->elevator->elevator_data;
u32 rt, be, idle;
spin_lock(&dd->lock);
rt = dd_owned_by_driver(dd, DD_RT_PRIO);
be = dd_owned_by_driver(dd, DD_BE_PRIO);
idle = dd_owned_by_driver(dd, DD_IDLE_PRIO);
spin_unlock(&dd->lock);
seq_printf(m, "%u %u %u\n", rt, be, idle);
seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO),
dd_owned_by_driver(dd, DD_BE_PRIO),
dd_owned_by_driver(dd, DD_IDLE_PRIO));
return 0;
}

View File

@@ -25,6 +25,7 @@ android/abi_gki_aarch64_general
android/abi_gki_aarch64_honor
android/abi_gki_aarch64_imx
android/abi_gki_aarch64_pixel
android/abi_gki_aarch64_microsoft
android/abi_gki_aarch64_moto
android/abi_gki_aarch64_mtktv
android/abi_gki_aarch64_mtk
@@ -50,6 +51,7 @@ arch/arm64/boot/Image.gz
# Update BUILD.bazel, define_common_kernels() if the value is not 1.
TRIM_NONLISTED_KMI=${TRIM_NONLISTED_KMI:-1}
KMI_SYMBOL_LIST_ADD_ONLY=1
KMI_SYMBOL_LIST_STRICT_MODE=${KMI_SYMBOL_LIST_STRICT_MODE:-1}
KMI_ENFORCED=1

View File

@@ -453,8 +453,15 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_page_look_around_ref);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_look_around);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_look_around_migrate_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_test_clear_look_around_ref);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_page_migrating);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_clear_page_migrating);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_alloc_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_enable_thermal_genl_check);
/*
* For type visibility
*/
const struct readahead_control *GKI_struct_readahead_control;
EXPORT_SYMBOL_GPL(GKI_struct_readahead_control);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_signal_whether_wake);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_check);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freeze_whether_wake);

View File

@@ -611,6 +611,7 @@ static void dm_bow_dtr(struct dm_target *ti)
wait_for_completion(dm_get_completion_from_kobject(kobj));
}
mutex_lock(&bc->ranges_lock);
while (rb_first(&bc->ranges)) {
struct bow_range *br = container_of(rb_first(&bc->ranges),
struct bow_range, node);
@@ -618,6 +619,7 @@ static void dm_bow_dtr(struct dm_target *ti)
rb_erase(&br->node, &bc->ranges);
kfree(br);
}
mutex_unlock(&bc->ranges_lock);
mutex_destroy(&bc->ranges_lock);
kfree(bc->log_sector);
@@ -1191,6 +1193,7 @@ static void dm_bow_tablestatus(struct dm_target *ti, char *result,
return;
}
mutex_lock(&bc->ranges_lock);
for (i = rb_first(&bc->ranges); i; i = rb_next(i)) {
struct bow_range *br = container_of(i, struct bow_range, node);
@@ -1198,11 +1201,11 @@ static void dm_bow_tablestatus(struct dm_target *ti, char *result,
readable_type[br->type],
(unsigned long long)br->sector);
if (result >= end)
return;
goto unlock;
result += scnprintf(result, end - result, "\n");
if (result >= end)
return;
goto unlock;
if (br->type == TRIMMED)
++trimmed_range_count;
@@ -1224,19 +1227,22 @@ static void dm_bow_tablestatus(struct dm_target *ti, char *result,
if (!rb_next(i)) {
scnprintf(result, end - result,
"\nERROR: Last range not of type TOP");
return;
goto unlock;
}
if (br->sector > range_top(br)) {
scnprintf(result, end - result,
"\nERROR: sectors out of order");
return;
goto unlock;
}
}
if (trimmed_range_count != trimmed_list_length)
scnprintf(result, end - result,
"\nERROR: not all trimmed ranges in trimmed list");
unlock:
mutex_unlock(&bc->ranges_lock);
}
static void dm_bow_status(struct dm_target *ti, status_type_t type,

View File

@@ -179,7 +179,8 @@ static void smsusb_stop_streaming(struct smsusb_device_t *dev)
for (i = 0; i < MAX_URBS; i++) {
usb_kill_urb(&dev->surbs[i].urb);
cancel_work_sync(&dev->surbs[i].wq);
if (dev->surbs[i].wq.func)
cancel_work_sync(&dev->surbs[i].wq);
if (dev->surbs[i].cb) {
smscore_putbuffer(dev->coredev, dev->surbs[i].cb);

View File

@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/llist.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/profile.h>
@@ -629,7 +630,6 @@ static const struct proc_ops uid_procstat_fops = {
};
struct update_stats_work {
struct work_struct work;
uid_t uid;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
struct task_struct *task;
@@ -637,38 +637,46 @@ struct update_stats_work {
struct task_io_accounting ioac;
u64 utime;
u64 stime;
struct llist_node node;
};
static LLIST_HEAD(work_usw);
static void update_stats_workfn(struct work_struct *work)
{
struct update_stats_work *usw =
container_of(work, struct update_stats_work, work);
struct update_stats_work *usw, *t;
struct uid_entry *uid_entry;
struct task_entry *task_entry __maybe_unused;
struct llist_node *node;
rt_mutex_lock(&uid_lock);
uid_entry = find_uid_entry(usw->uid);
if (!uid_entry)
goto exit;
uid_entry->utime += usw->utime;
uid_entry->stime += usw->stime;
node = llist_del_all(&work_usw);
llist_for_each_entry_safe(usw, t, node, node) {
uid_entry = find_uid_entry(usw->uid);
if (!uid_entry)
goto next;
uid_entry->utime += usw->utime;
uid_entry->stime += usw->stime;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
task_entry = find_task_entry(uid_entry, usw->task);
if (!task_entry)
goto exit;
add_uid_tasks_io_stats(task_entry, &usw->ioac,
UID_STATE_DEAD_TASKS);
task_entry = find_task_entry(uid_entry, usw->task);
if (!task_entry)
goto next;
add_uid_tasks_io_stats(task_entry, &usw->ioac,
UID_STATE_DEAD_TASKS);
#endif
__add_uid_io_stats(uid_entry, &usw->ioac, UID_STATE_DEAD_TASKS);
exit:
__add_uid_io_stats(uid_entry, &usw->ioac, UID_STATE_DEAD_TASKS);
next:
#ifdef CONFIG_UID_SYS_STATS_DEBUG
put_task_struct(usw->task);
#endif
kfree(usw);
}
rt_mutex_unlock(&uid_lock);
#ifdef CONFIG_UID_SYS_STATS_DEBUG
put_task_struct(usw->task);
#endif
kfree(usw);
}
static DECLARE_WORK(update_stats_work, update_stats_workfn);
static int process_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
@@ -687,7 +695,6 @@ static int process_notifier(struct notifier_block *self,
usw = kmalloc(sizeof(struct update_stats_work), GFP_KERNEL);
if (usw) {
INIT_WORK(&usw->work, update_stats_workfn);
usw->uid = uid;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
usw->task = get_task_struct(task);
@@ -698,7 +705,8 @@ static int process_notifier(struct notifier_block *self,
*/
usw->ioac = task->ioac;
task_cputime_adjusted(task, &usw->utime, &usw->stime);
schedule_work(&usw->work);
llist_add(&usw->node, &work_usw);
schedule_work(&update_stats_work);
}
return NOTIFY_OK;
}

View File

@@ -523,7 +523,7 @@ static int tap_open(struct inode *inode, struct file *file)
q->sock.state = SS_CONNECTED;
q->sock.file = file;
q->sock.ops = &tap_socket_ops;
sock_init_data_uid(&q->sock, &q->sk, inode->i_uid);
sock_init_data_uid(&q->sock, &q->sk, current_fsuid());
q->sk.sk_write_space = tap_sock_write_space;
q->sk.sk_destruct = tap_sock_destruct;
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;

View File

@@ -3411,7 +3411,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
tfile->socket.file = file;
tfile->socket.ops = &tun_socket_ops;
sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid);
sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid());
tfile->sk.sk_write_space = tun_sock_write_space;
tfile->sk.sk_sndbuf = INT_MAX;

View File

@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <net/genetlink.h>
#include <trace/hooks/thermal.h>
#include <uapi/linux/thermal.h>
#include "thermal_core.h"
@@ -229,6 +230,11 @@ static int thermal_genl_send_event(enum thermal_genl_event event,
struct sk_buff *msg;
int ret = -EMSGSIZE;
void *hdr;
int enable_thermal_genl = 1;
trace_android_vh_enable_thermal_genl_check(event, p->tz_id, &enable_thermal_genl);
if (!enable_thermal_genl)
return 0;
msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg)

View File

@@ -679,8 +679,11 @@ fail:
pr_err("acc_bind() could not allocate requests\n");
while ((req = req_get(dev, &dev->tx_idle)))
acc_request_free(req, dev->ep_in);
for (i = 0; i < RX_REQ_MAX; i++)
for (i = 0; i < RX_REQ_MAX; i++) {
acc_request_free(dev->rx_req[i], dev->ep_out);
dev->rx_req[i] = NULL;
}
return -1;
}
@@ -712,6 +715,12 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
goto done;
}
if (!dev->rx_req[0]) {
pr_warn("acc_read: USB request already handled/freed");
r = -EINVAL;
goto done;
}
/*
* Calculate the data length by considering termination character.
* Then compansite the difference of rounding up to
@@ -1208,8 +1217,10 @@ acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
while ((req = req_get(dev, &dev->tx_idle)))
acc_request_free(req, dev->ep_in);
for (i = 0; i < RX_REQ_MAX; i++)
for (i = 0; i < RX_REQ_MAX; i++) {
acc_request_free(dev->rx_req[i], dev->ep_out);
dev->rx_req[i] = NULL;
}
acc_hid_unbind(dev);
}

View File

@@ -761,6 +761,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
unsigned long stack_size;
unsigned long stack_expand;
unsigned long rlim_stack;
struct mmu_gather tlb;
#ifdef CONFIG_STACK_GROWSUP
/* Limit stack size */
@@ -815,8 +816,11 @@ int setup_arg_pages(struct linux_binprm *bprm,
vm_flags |= mm->def_flags;
vm_flags |= VM_STACK_INCOMPLETE_SETUP;
ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
tlb_gather_mmu(&tlb, mm);
ret = mprotect_fixup(&tlb, vma, &prev, vma->vm_start, vma->vm_end,
vm_flags);
tlb_finish_mmu(&tlb);
if (ret)
goto out_unlock;
BUG_ON(prev != vma);

View File

@@ -34,6 +34,7 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
{
int i;
struct exfat_entry_set_cache *es;
unsigned int uni_len = 0, len;
es = exfat_get_dentry_set(sb, p_dir, entry, ES_ALL_ENTRIES);
if (!es)
@@ -52,7 +53,10 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
if (exfat_get_entry_type(ep) != TYPE_EXTEND)
break;
exfat_extract_uni_name(ep, uniname);
len = exfat_extract_uni_name(ep, uniname);
uni_len += len;
if (len != EXFAT_FILE_NAME_LEN || uni_len >= MAX_NAME_LENGTH)
break;
uniname += EXFAT_FILE_NAME_LEN;
}
@@ -1035,7 +1039,8 @@ rewind:
if (entry_type == TYPE_EXTEND) {
unsigned short entry_uniname[16], unichar;
if (step != DIRENT_STEP_NAME) {
if (step != DIRENT_STEP_NAME ||
name_len >= MAX_NAME_LENGTH) {
step = DIRENT_STEP_FILE;
continue;
}

View File

@@ -1446,32 +1446,34 @@ int fuse_mkdir_initialize(
int fuse_mkdir_backing(
struct fuse_bpf_args *fa,
struct inode *dir, struct dentry *entry, umode_t mode)
struct inode *dir_inode, struct dentry *entry, umode_t mode)
{
int err = 0;
const struct fuse_mkdir_in *fmi = fa->in_args[0].value;
struct fuse_inode *fuse_inode = get_fuse_inode(dir);
struct inode *backing_inode = fuse_inode->backing_inode;
struct fuse_inode *dir_fuse_inode = get_fuse_inode(dir_inode);
struct inode *dir_backing_inode = dir_fuse_inode->backing_inode;
struct path backing_path = {};
struct inode *inode = NULL;
struct dentry *d;
//TODO Actually deal with changing the backing entry in mkdir
get_fuse_backing_path(entry, &backing_path);
if (!backing_path.dentry)
return -EBADF;
inode_lock_nested(backing_inode, I_MUTEX_PARENT);
inode_lock_nested(dir_backing_inode, I_MUTEX_PARENT);
mode = fmi->mode;
if (!IS_POSIXACL(backing_inode))
if (!IS_POSIXACL(dir_backing_inode))
mode &= ~fmi->umask;
err = vfs_mkdir(&init_user_ns, backing_inode, backing_path.dentry, mode);
err = vfs_mkdir(&init_user_ns, dir_backing_inode, backing_path.dentry,
mode);
if (err)
goto out;
if (d_really_is_negative(backing_path.dentry) ||
unlikely(d_unhashed(backing_path.dentry))) {
d = lookup_one_len(entry->d_name.name, backing_path.dentry->d_parent,
entry->d_name.len);
struct dentry *d = lookup_one_len(entry->d_name.name,
backing_path.dentry->d_parent,
entry->d_name.len);
if (IS_ERR(d)) {
err = PTR_ERR(d);
goto out;
@@ -1479,14 +1481,19 @@ int fuse_mkdir_backing(
dput(backing_path.dentry);
backing_path.dentry = d;
}
inode = fuse_iget_backing(dir->i_sb, fuse_inode->nodeid, backing_inode);
inode = fuse_iget_backing(dir_inode->i_sb, 0,
backing_path.dentry->d_inode);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out;
}
d_instantiate(entry, inode);
if (get_fuse_inode(inode)->bpf)
bpf_prog_put(get_fuse_inode(inode)->bpf);
get_fuse_inode(inode)->bpf = get_fuse_dentry(entry)->bpf;
get_fuse_dentry(entry)->bpf = NULL;
out:
inode_unlock(backing_inode);
inode_unlock(dir_backing_inode);
path_put(&backing_path);
return err;
}

View File

@@ -280,10 +280,12 @@ int ovl_permission(struct user_namespace *mnt_userns,
{
struct inode *upperinode = ovl_inode_upper(inode);
struct inode *realinode = upperinode ?: ovl_inode_lower(inode);
struct path realpath;
const struct cred *old_cred;
int err;
/* Careful in RCU walk mode */
realinode = ovl_i_path_real(inode, &realpath);
if (!realinode) {
WARN_ON(!(mask & MAY_NOT_BLOCK));
return -ECHILD;
@@ -449,11 +451,22 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
struct posix_acl *ovl_get_acl(struct inode *inode, int type, bool rcu)
{
struct inode *realinode = ovl_inode_real(inode);
struct inode *realinode;
const struct cred *old_cred;
struct posix_acl *acl;
struct path realpath;
if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode))
if (!IS_ENABLED(CONFIG_FS_POSIX_ACL))
return NULL;
/* Careful in RCU walk mode */
realinode = ovl_i_path_real(inode, &realpath);
if (!realinode) {
WARN_ON(!rcu);
return ERR_PTR(-ECHILD);
}
if (!IS_POSIXACL(realinode))
return NULL;
if (rcu)

View File

@@ -294,7 +294,7 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry);
void ovl_path_upper(struct dentry *dentry, struct path *path);
void ovl_path_lower(struct dentry *dentry, struct path *path);
void ovl_path_lowerdata(struct dentry *dentry, struct path *path);
void ovl_i_path_real(struct inode *inode, struct path *path);
struct inode *ovl_i_path_real(struct inode *inode, struct path *path);
enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
struct dentry *ovl_dentry_upper(struct dentry *dentry);
struct dentry *ovl_dentry_lower(struct dentry *dentry);

View File

@@ -245,7 +245,7 @@ struct dentry *ovl_i_dentry_upper(struct inode *inode)
return ovl_upperdentry_dereference(OVL_I(inode));
}
void ovl_i_path_real(struct inode *inode, struct path *path)
struct inode *ovl_i_path_real(struct inode *inode, struct path *path)
{
path->dentry = ovl_i_dentry_upper(inode);
if (!path->dentry) {
@@ -254,6 +254,8 @@ void ovl_i_path_real(struct inode *inode, struct path *path)
} else {
path->mnt = ovl_upper_mnt(OVL_FS(inode->i_sb));
}
return path->dentry ? d_inode(path->dentry) : NULL;
}
struct inode *ovl_inode_upper(struct inode *inode)
@@ -1100,8 +1102,7 @@ void ovl_copyattr(struct inode *inode)
struct inode *realinode;
struct user_namespace *real_mnt_userns;
ovl_i_path_real(inode, &realpath);
realinode = d_inode(realpath.dentry);
realinode = ovl_i_path_real(inode, &realpath);
real_mnt_userns = mnt_user_ns(realpath.mnt);
inode->i_uid = i_uid_into_mnt(real_mnt_userns, realinode);

View File

@@ -518,7 +518,7 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
sig ^= PERSISTENT_RAM_SIG;
if (prz->buffer->sig == sig) {
if (buffer_size(prz) == 0) {
if (buffer_size(prz) == 0 && buffer_start(prz) == 0) {
pr_debug("found existing empty buffer\n");
return 0;
}

View File

@@ -662,6 +662,20 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
} while (0)
#endif
#ifndef pte_needs_flush
static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
{
return true;
}
#endif
#ifndef huge_pmd_needs_flush
static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
{
return true;
}
#endif
#endif /* CONFIG_MMU */
#endif /* _ASM_GENERIC__TLB_H */

View File

@@ -36,8 +36,9 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
unsigned long addr);
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
pgprot_t newprot, unsigned long cp_flags);
int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, pgprot_t newprot,
unsigned long cp_flags);
vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
pgprot_t pgprot, bool write);

View File

@@ -1958,10 +1958,11 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
MM_CP_UFFD_WP_RESOLVE)
extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
extern unsigned long change_protection(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgprot_t newprot,
unsigned long cp_flags);
extern int mprotect_fixup(struct vm_area_struct *vma,
extern int mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct vm_area_struct **pprev, unsigned long start,
unsigned long end, unsigned long newflags);

View File

@@ -575,6 +575,26 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp);
#endif
#ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
/*
* pmdp_invalidate_ad() invalidates the PMD while changing a transparent
* hugepage mapping in the page tables. This function is similar to
* pmdp_invalidate(), but should only be used if the access and dirty bits would
* not be cleared by the software in the new PMD value. The function ensures
* that hardware changes of the access and dirty bits updates would not be lost.
*
* Doing so can allow in certain architectures to avoid a TLB flush in most
* cases. Yet, another TLB flush might be necessary later if the PMD update
* itself requires such flush (e.g., if protection was set to be stricter). Yet,
* even when a TLB flush is needed because of the update, the caller may be able
* to batch these TLB flushing operations, so fewer TLB flush operations are
* needed.
*/
extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
#endif
#ifndef __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{

View File

@@ -108,6 +108,19 @@ DECLARE_HOOK(android_vh_task_blocks_on_rtmutex,
DECLARE_HOOK(android_vh_rtmutex_waiter_prio,
TP_PROTO(struct task_struct *task, int *waiter_prio),
TP_ARGS(task, waiter_prio));
DECLARE_HOOK(android_vh_exit_signal_whether_wake,
TP_PROTO(struct task_struct *p, bool *wake),
TP_ARGS(p, wake));
DECLARE_HOOK(android_vh_exit_check,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
DECLARE_HOOK(android_vh_freeze_whether_wake,
TP_PROTO(struct task_struct *t, bool *wake),
TP_ARGS(t, wake));
#endif /* _TRACE_HOOK_DTASK_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@@ -32,6 +32,7 @@ struct slabinfo;
struct track;
struct address_space;
struct page_vma_mapped_walk;
struct cma;
DECLARE_RESTRICTED_HOOK(android_rvh_set_skip_swapcache_flags,
TP_PROTO(gfp_t *flags),
@@ -244,6 +245,16 @@ DECLARE_HOOK(android_vh_look_around,
DECLARE_HOOK(android_vh_try_cma_fallback,
TP_PROTO(struct zone *zone, unsigned int order, bool *try_cma),
TP_ARGS(zone, order, try_cma));
DECLARE_HOOK(android_vh_set_page_migrating,
TP_PROTO(struct page *page),
TP_ARGS(page));
DECLARE_HOOK(android_vh_clear_page_migrating,
TP_PROTO(struct page *page),
TP_ARGS(page));
DECLARE_HOOK(android_vh_cma_alloc_bypass,
TP_PROTO(struct cma *cma, unsigned long count, unsigned int align,
bool no_warn, struct page **page, bool *bypass),
TP_ARGS(cma, count, align, no_warn, page, bypass));
#endif /* _TRACE_HOOK_MM_H */
/* This part must be outside protection */

View File

@@ -58,6 +58,10 @@ DECLARE_HOOK(android_vh_modify_thermal_cpu_get_power,
TP_PROTO(struct cpufreq_policy *policy, u32 *power),
TP_ARGS(policy, power));
DECLARE_HOOK(android_vh_enable_thermal_genl_check,
TP_PROTO(int event, int tz_id, int *enable_thermal_genl),
TP_ARGS(event, tz_id, enable_thermal_genl));
#endif /* _TRACE_HOOK_THERMAL_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@@ -56,15 +56,29 @@ struct fuse_in_postfilter_header {
/** One input argument of a request */
struct fuse_bpf_in_arg {
uint32_t size;
const void *value;
const void *end_offset;
uint32_t padding;
union {
const void *value;
uint64_t padding2;
};
union {
const void *end_offset;
uint64_t padding3;
};
};
/** One output argument of a request */
struct fuse_bpf_arg {
uint32_t size;
void *value;
void *end_offset;
uint32_t padding;
union {
void *value;
uint64_t padding2;
};
union {
void *end_offset;
uint64_t padding3;
};
};
#define FUSE_MAX_IN_ARGS 5
@@ -80,6 +94,7 @@ struct fuse_bpf_args {
uint32_t in_numargs;
uint32_t out_numargs;
uint32_t flags;
uint32_t padding;
struct fuse_bpf_in_arg in_args[FUSE_MAX_IN_ARGS];
struct fuse_bpf_arg out_args[FUSE_MAX_OUT_ARGS];
};

View File

@@ -7,6 +7,7 @@
#include "cgroup-internal.h"
#include <trace/events/cgroup.h>
#include <trace/hooks/dtask.h>
/*
* Propagate the cgroup frozen state upwards by the cgroup tree.
@@ -155,17 +156,21 @@ void cgroup_leave_frozen(bool always_leave)
static void cgroup_freeze_task(struct task_struct *task, bool freeze)
{
unsigned long flags;
bool wake = true;
/* If the task is about to die, don't bother with freezing it. */
if (!lock_task_sighand(task, &flags))
return;
trace_android_vh_freeze_whether_wake(task, &wake);
if (freeze) {
task->jobctl |= JOBCTL_TRAP_FREEZE;
signal_wake_up(task, false);
if (wake)
signal_wake_up(task, false);
} else {
task->jobctl &= ~JOBCTL_TRAP_FREEZE;
wake_up_process(task);
if (wake)
wake_up_process(task);
}
unlock_task_sighand(task, &flags);

View File

@@ -1223,6 +1223,11 @@ static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
return 0;
}
static int perf_mux_hrtimer_restart_ipi(void *arg)
{
return perf_mux_hrtimer_restart(arg);
}
void perf_pmu_disable(struct pmu *pmu)
{
int *count = this_cpu_ptr(pmu->pmu_disable_count);
@@ -11044,8 +11049,7 @@ perf_event_mux_interval_ms_store(struct device *dev,
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
cpu_function_call(cpu,
(remote_function_f)perf_mux_hrtimer_restart, cpuctx);
cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpuctx);
}
cpus_read_unlock();
mutex_unlock(&mux_interval_mutex);

View File

@@ -70,6 +70,7 @@
#include <asm/unistd.h>
#include <asm/mmu_context.h>
#include <trace/hooks/mm.h>
#include <trace/hooks/dtask.h>
/*
* The default value should be high enough to not crash a system that randomly
@@ -830,6 +831,8 @@ void __noreturn do_exit(long code)
io_uring_files_cancel();
exit_signals(tsk); /* sets PF_EXITING */
trace_android_vh_exit_check(current);
/* sync mm's RSS info before statistics gathering */
if (tsk->mm)
sync_mm_rss(tsk->mm);

View File

@@ -1735,7 +1735,7 @@ static bool copy_data(struct prb_data_ring *data_ring,
if (!buf || !buf_size)
return true;
data_size = min_t(u16, buf_size, len);
data_size = min_t(unsigned int, buf_size, len);
memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */
return true;

View File

@@ -4323,6 +4323,7 @@ int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
}
EXPORT_SYMBOL(wake_up_state);
/*
* Perform scheduler related setup for a newly forked process p.
@@ -5165,6 +5166,7 @@ unsigned int nr_running(void)
return sum;
}
EXPORT_SYMBOL(nr_running);
/*
* Check if only the current task is running on the CPU.

View File

@@ -59,6 +59,7 @@
#undef CREATE_TRACE_POINTS
#include <trace/hooks/signal.h>
#include <trace/hooks/dtask.h>
/*
* SLAB caches for signal bits.
*/
@@ -994,6 +995,7 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
{
struct signal_struct *signal = p->signal;
struct task_struct *t;
bool wake;
/*
* Now find a thread we can wake up to take the signal off the queue.
@@ -1053,7 +1055,10 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
trace_android_vh_exit_signal(t);
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
wake = true;
trace_android_vh_exit_signal_whether_wake(t, &wake);
if (wake)
signal_wake_up(t, 1);
} while_each_thread(p, t);
return;
}

View File

@@ -445,6 +445,12 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
int ret = -ENOMEM;
int num_attempts = 0;
int max_retries = 5;
bool bypass = false;
trace_android_vh_cma_alloc_bypass(cma, count, align, no_warn,
&page, &bypass);
if (bypass)
return page;
if (!cma || !cma->count || !cma->bitmap)
goto out;

View File

@@ -2611,6 +2611,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
int i, error = 0;
bool writably_mapped;
loff_t isize, end_offset;
loff_t last_pos = ra->prev_pos;
if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes))
return 0;
@@ -2659,7 +2660,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
* mark it as accessed the first time.
*/
if (iocb->ki_pos >> PAGE_SHIFT !=
ra->prev_pos >> PAGE_SHIFT)
last_pos >> PAGE_SHIFT)
mark_page_accessed(pvec.pages[0]);
for (i = 0; i < pagevec_count(&pvec); i++) {
@@ -2690,7 +2691,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
already_read += copied;
iocb->ki_pos += copied;
ra->prev_pos = iocb->ki_pos;
last_pos = iocb->ki_pos;
if (copied < bytes) {
error = -EFAULT;
@@ -2704,7 +2705,7 @@ put_pages:
} while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);
file_accessed(filp);
ra->prev_pos = last_pos;
return already_read ? already_read : error;
}
EXPORT_SYMBOL_GPL(filemap_read);

View File

@@ -1720,18 +1720,21 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
* or if prot_numa but THP migration is not supported
* - HPAGE_PMD_NR if protections changed and TLB flush necessary
*/
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot, unsigned long cp_flags)
int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, pgprot_t newprot,
unsigned long cp_flags)
{
struct mm_struct *mm = vma->vm_mm;
spinlock_t *ptl;
pmd_t entry;
pmd_t oldpmd, entry;
bool preserve_write;
int ret;
bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
if (prot_numa && !thp_migration_supported())
return 1;
@@ -1795,12 +1798,12 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
* The race makes MADV_DONTNEED miss the huge pmd and don't clear it
* which may break userspace.
*
* pmdp_invalidate() is required to make sure we don't miss
* pmdp_invalidate_ad() is required to make sure we don't miss
* dirty/young flags set by hardware.
*/
entry = pmdp_invalidate(vma, addr, pmd);
oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
entry = pmd_modify(entry, newprot);
entry = pmd_modify(oldpmd, newprot);
if (preserve_write)
entry = pmd_mk_savedwrite(entry);
if (uffd_wp) {
@@ -1816,6 +1819,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
}
ret = HPAGE_PMD_NR;
set_pmd_at(mm, addr, pmd, entry);
if (huge_pmd_needs_flush(oldpmd, entry))
tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
unlock:
spin_unlock(ptl);

View File

@@ -78,6 +78,7 @@ struct cgroup_subsys memory_cgrp_subsys __read_mostly;
EXPORT_SYMBOL(memory_cgrp_subsys);
struct mem_cgroup *root_mem_cgroup __read_mostly;
EXPORT_SYMBOL_GPL(root_mem_cgroup);
/* Active memory cgroup to use from an interrupt context */
DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);

View File

@@ -104,6 +104,7 @@
#include <linux/swapops.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <linux/uaccess.h>
#include "internal.h"
@@ -634,12 +635,18 @@ unlock:
unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
struct mmu_gather tlb;
int nr_updated;
nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
tlb_gather_mmu(&tlb, vma->vm_mm);
nr_updated = change_protection(&tlb, vma, addr, end, PAGE_NONE,
MM_CP_PROT_NUMA);
if (nr_updated)
count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
tlb_finish_mmu(&tlb);
return nr_updated;
}
#else

View File

@@ -281,10 +281,14 @@ void remove_migration_ptes(struct page *old, struct page *new, bool locked)
.arg = old,
};
trace_android_vh_set_page_migrating(new);
if (locked)
rmap_walk_locked(new, &rwc);
else
rmap_walk(new, &rwc);
trace_android_vh_clear_page_migrating(new);
}
/*

View File

@@ -32,12 +32,13 @@
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include "internal.h"
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end, pgprot_t newprot,
unsigned long cp_flags)
static unsigned long change_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
unsigned long end, pgprot_t newprot, unsigned long cp_flags)
{
pte_t *pte, oldpte;
spinlock_t *ptl;
@@ -48,6 +49,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
tlb_change_page_size(tlb, PAGE_SIZE);
/*
* Can be called with only the mmap_lock for reading by
* prot_numa so we must check the pmd isn't constantly
@@ -138,6 +141,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
ptent = pte_mkwrite(ptent);
}
ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
if (pte_needs_flush(oldpte, ptent))
tlb_flush_pte_range(tlb, addr, PAGE_SIZE);
pages++;
} else if (is_swap_pte(oldpte)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
@@ -219,9 +224,9 @@ static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
return 0;
}
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
pud_t *pud, unsigned long addr, unsigned long end,
pgprot_t newprot, unsigned long cp_flags)
static inline unsigned long change_pmd_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pud_t *pud, unsigned long addr,
unsigned long end, pgprot_t newprot, unsigned long cp_flags)
{
pmd_t *pmd;
unsigned long next;
@@ -261,8 +266,12 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
if (next - addr != HPAGE_PMD_SIZE) {
__split_huge_pmd(vma, pmd, addr, false, NULL);
} else {
int nr_ptes = change_huge_pmd(vma, pmd, addr,
newprot, cp_flags);
/*
* change_huge_pmd() does not defer TLB flushes,
* so no need to propagate the tlb argument.
*/
int nr_ptes = change_huge_pmd(tlb, vma, pmd,
addr, newprot, cp_flags);
if (nr_ptes) {
if (nr_ptes == HPAGE_PMD_NR) {
@@ -276,8 +285,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
}
/* fall through, the trans huge pmd just split */
}
this_pages = change_pte_range(vma, pmd, addr, next, newprot,
cp_flags);
this_pages = change_pte_range(tlb, vma, pmd, addr, next,
newprot, cp_flags);
pages += this_pages;
next:
cond_resched();
@@ -291,9 +300,9 @@ next:
return pages;
}
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
p4d_t *p4d, unsigned long addr, unsigned long end,
pgprot_t newprot, unsigned long cp_flags)
static inline unsigned long change_pud_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
unsigned long end, pgprot_t newprot, unsigned long cp_flags)
{
pud_t *pud;
unsigned long next;
@@ -304,16 +313,16 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma,
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
pages += change_pmd_range(vma, pud, addr, next, newprot,
pages += change_pmd_range(tlb, vma, pud, addr, next, newprot,
cp_flags);
} while (pud++, addr = next, addr != end);
return pages;
}
static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
pgd_t *pgd, unsigned long addr, unsigned long end,
pgprot_t newprot, unsigned long cp_flags)
static inline unsigned long change_p4d_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
unsigned long end, pgprot_t newprot, unsigned long cp_flags)
{
p4d_t *p4d;
unsigned long next;
@@ -324,44 +333,40 @@ static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
next = p4d_addr_end(addr, end);
if (p4d_none_or_clear_bad(p4d))
continue;
pages += change_pud_range(vma, p4d, addr, next, newprot,
pages += change_pud_range(tlb, vma, p4d, addr, next, newprot,
cp_flags);
} while (p4d++, addr = next, addr != end);
return pages;
}
static unsigned long change_protection_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end, pgprot_t newprot,
unsigned long cp_flags)
static unsigned long change_protection_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long addr,
unsigned long end, pgprot_t newprot, unsigned long cp_flags)
{
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
unsigned long next;
unsigned long start = addr;
unsigned long pages = 0;
BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
flush_cache_range(vma, addr, end);
inc_tlb_flush_pending(mm);
tlb_start_vma(tlb, vma);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
pages += change_p4d_range(vma, pgd, addr, next, newprot,
pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot,
cp_flags);
} while (pgd++, addr = next, addr != end);
/* Only flush the TLB if we actually modified any entries: */
if (pages)
flush_tlb_range(vma, start, end);
dec_tlb_flush_pending(mm);
tlb_end_vma(tlb, vma);
return pages;
}
unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
unsigned long change_protection(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgprot_t newprot,
unsigned long cp_flags)
{
@@ -372,7 +377,7 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
if (is_vm_hugetlb_page(vma))
pages = hugetlb_change_protection(vma, start, end, newprot);
else
pages = change_protection_range(vma, start, end, newprot,
pages = change_protection_range(tlb, vma, start, end, newprot,
cp_flags);
return pages;
@@ -406,8 +411,9 @@ static const struct mm_walk_ops prot_none_walk_ops = {
};
int
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
unsigned long start, unsigned long end, unsigned long newflags)
mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct vm_area_struct **pprev, unsigned long start,
unsigned long end, unsigned long newflags)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long oldflags = vma->vm_flags;
@@ -494,7 +500,7 @@ success:
dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
vma_set_page_prot(vma);
change_protection(vma, start, end, vma->vm_page_prot,
change_protection(tlb, vma, start, end, vma->vm_page_prot,
dirty_accountable ? MM_CP_DIRTY_ACCT : 0);
/*
@@ -528,6 +534,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
(prot & PROT_READ);
struct mmu_gather tlb;
start = untagged_addr(start);
@@ -584,6 +591,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
if (start > vma->vm_start)
prev = vma;
tlb_gather_mmu(&tlb, current->mm);
for (nstart = start ; ; ) {
unsigned long mask_off_old_flags;
unsigned long newflags;
@@ -610,18 +618,18 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
/* newflags >> 4 shift VM_MAY% in place of VM_% */
if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) {
error = -EACCES;
goto out;
break;
}
/* Allow architectures to sanity-check the new flags */
if (!arch_validate_flags(newflags)) {
error = -EINVAL;
goto out;
break;
}
error = security_file_mprotect(vma, reqprot, prot);
if (error)
goto out;
break;
tmp = vma->vm_end;
if (tmp > end)
@@ -630,27 +638,28 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
if (vma->vm_ops && vma->vm_ops->mprotect) {
error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags);
if (error)
goto out;
break;
}
error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
error = mprotect_fixup(&tlb, vma, &prev, nstart, tmp, newflags);
if (error)
goto out;
break;
nstart = tmp;
if (nstart < prev->vm_end)
nstart = prev->vm_end;
if (nstart >= end)
goto out;
break;
vma = prev->vm_next;
if (!vma || vma->vm_start != nstart) {
error = -ENOMEM;
goto out;
break;
}
prot = reqprot;
}
tlb_finish_mmu(&tlb);
out:
mmap_write_unlock(current->mm);
return error;

View File

@@ -33,6 +33,7 @@ struct page_owner {
static bool page_owner_enabled = false;
DEFINE_STATIC_KEY_FALSE(page_owner_inited);
EXPORT_SYMBOL_GPL(page_owner_inited);
static depot_stack_handle_t dummy_handle;
static depot_stack_handle_t failure_handle;
@@ -205,6 +206,7 @@ noinline void __set_page_owner(struct page *page, unsigned int order,
__set_page_owner_handle(page_ext, handle, order, gfp_mask);
page_ext_put(page_ext);
}
EXPORT_SYMBOL_GPL(__set_page_owner);
void __set_page_owner_migrate_reason(struct page *page, int reason)
{

View File

@@ -200,6 +200,14 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
}
#endif
#ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
return pmdp_invalidate(vma, address, pmdp);
}
#endif
#ifndef pmdp_collapse_flush
pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)

View File

@@ -930,6 +930,7 @@ int page_referenced(struct page *page,
return rwc.contended ? -1 : pra.referenced;
}
EXPORT_SYMBOL_GPL(page_referenced);
static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
@@ -2066,10 +2067,14 @@ void try_to_migrate(struct page *page, enum ttu_flags flags)
if (!PageKsm(page) && PageAnon(page))
rwc.invalid_vma = invalid_migration_vma;
trace_android_vh_set_page_migrating(page);
if (flags & TTU_RMAP_LOCKED)
rmap_walk_locked(page, &rwc);
else
rmap_walk(page, &rwc);
trace_android_vh_clear_page_migrating(page);
}
/*

View File

@@ -16,6 +16,7 @@
#include <linux/hugetlb.h>
#include <linux/shmem_fs.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include "internal.h"
static __always_inline
@@ -686,6 +687,7 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
atomic_t *mmap_changing)
{
struct vm_area_struct *dst_vma;
struct mmu_gather tlb;
pgprot_t newprot;
int err;
@@ -727,8 +729,10 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
else
newprot = vm_get_page_prot(dst_vma->vm_flags);
change_protection(dst_vma, start, start + len, newprot,
tlb_gather_mmu(&tlb, dst_mm);
change_protection(&tlb, dst_vma, start, start + len, newprot,
enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
tlb_finish_mmu(&tlb);
err = 0;
out_unlock:

View File

@@ -45,6 +45,7 @@ static const struct proto_ops l2cap_sock_ops;
static void l2cap_sock_init(struct sock *sk, struct sock *parent);
static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
int proto, gfp_t prio, int kern);
static void l2cap_sock_cleanup_listen(struct sock *parent);
bool l2cap_is_socket(struct socket *sock)
{
@@ -1414,6 +1415,7 @@ static int l2cap_sock_release(struct socket *sock)
if (!sk)
return 0;
l2cap_sock_cleanup_listen(sk);
bt_sock_unlink(&l2cap_sk_list, sk);
err = l2cap_sock_shutdown(sock, SHUT_RDWR);

View File

@@ -3479,8 +3479,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
return PTR_ERR(chain);
}
if (nft_chain_is_bound(chain))
return -EOPNOTSUPP;
} else if (nla[NFTA_RULE_CHAIN_ID]) {
chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID]);
@@ -3492,6 +3490,9 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
return -EINVAL;
}
if (nft_chain_is_bound(chain))
return -EOPNOTSUPP;
if (nla[NFTA_RULE_HANDLE]) {
handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
rule = __nft_rule_lookup(chain, handle);
@@ -6585,6 +6586,7 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx,
ret = __nft_set_catchall_flush(ctx, set, &elem);
if (ret < 0)
break;
nft_set_elem_change_active(ctx->net, set, ext);
}
return ret;

View File

@@ -587,9 +587,9 @@ __build_packet_message(struct nfnl_log_net *log,
goto nla_put_failure;
}
if (hooknum <= NF_INET_FORWARD && skb->tstamp) {
if (hooknum <= NF_INET_FORWARD) {
struct nfulnl_msg_packet_timestamp ts;
struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
struct timespec64 kts = ktime_to_timespec64(skb->tstamp ?: ktime_get_real());
ts.sec = cpu_to_be64(kts.tv_sec);
ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);

View File

@@ -30,11 +30,11 @@ void nft_byteorder_eval(const struct nft_expr *expr,
const struct nft_byteorder *priv = nft_expr_priv(expr);
u32 *src = &regs->data[priv->sreg];
u32 *dst = &regs->data[priv->dreg];
union { u32 u32; u16 u16; } *s, *d;
u16 *s16, *d16;
unsigned int i;
s = (void *)src;
d = (void *)dst;
s16 = (void *)src;
d16 = (void *)dst;
switch (priv->size) {
case 8: {
@@ -61,11 +61,11 @@ void nft_byteorder_eval(const struct nft_expr *expr,
switch (priv->op) {
case NFT_BYTEORDER_NTOH:
for (i = 0; i < priv->len / 4; i++)
d[i].u32 = ntohl((__force __be32)s[i].u32);
dst[i] = ntohl((__force __be32)src[i]);
break;
case NFT_BYTEORDER_HTON:
for (i = 0; i < priv->len / 4; i++)
d[i].u32 = (__force __u32)htonl(s[i].u32);
dst[i] = (__force __u32)htonl(src[i]);
break;
}
break;
@@ -73,11 +73,11 @@ void nft_byteorder_eval(const struct nft_expr *expr,
switch (priv->op) {
case NFT_BYTEORDER_NTOH:
for (i = 0; i < priv->len / 2; i++)
d[i].u16 = ntohs((__force __be16)s[i].u16);
d16[i] = ntohs((__force __be16)s16[i]);
break;
case NFT_BYTEORDER_HTON:
for (i = 0; i < priv->len / 2; i++)
d[i].u16 = (__force __u16)htons(s[i].u16);
d16[i] = (__force __u16)htons(s16[i]);
break;
}
break;

View File

@@ -202,7 +202,6 @@ void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *s);
void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *s);
void nfc_llcp_socket_remote_param_init(struct nfc_llcp_sock *sock);
struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local);
int nfc_llcp_local_put(struct nfc_llcp_local *local);
u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
struct nfc_llcp_sock *sock);

View File

@@ -361,6 +361,7 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
struct sk_buff *skb;
struct nfc_llcp_local *local;
u16 size = 0;
int err;
pr_debug("Sending SYMM\n");
@@ -372,8 +373,10 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
skb = alloc_skb(size, GFP_KERNEL);
if (skb == NULL)
return -ENOMEM;
if (skb == NULL) {
err = -ENOMEM;
goto out;
}
skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
@@ -383,8 +386,11 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_TX);
return nfc_data_exchange(dev, local->target_idx, skb,
err = nfc_data_exchange(dev, local->target_idx, skb,
nfc_llcp_recv, local);
out:
nfc_llcp_local_put(local);
return err;
}
int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)

View File

@@ -17,6 +17,8 @@
static u8 llcp_magic[3] = {0x46, 0x66, 0x6d};
static LIST_HEAD(llcp_devices);
/* Protects llcp_devices list */
static DEFINE_SPINLOCK(llcp_devices_lock);
static void nfc_llcp_rx_skb(struct nfc_llcp_local *local, struct sk_buff *skb);
@@ -143,7 +145,7 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool device,
write_unlock(&local->raw_sockets.lock);
}
struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
static struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
{
kref_get(&local->ref);
@@ -171,7 +173,6 @@ static void local_release(struct kref *ref)
local = container_of(ref, struct nfc_llcp_local, ref);
list_del(&local->list);
local_cleanup(local);
kfree(local);
}
@@ -284,12 +285,33 @@ static void nfc_llcp_sdreq_timer(struct timer_list *t)
struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev)
{
struct nfc_llcp_local *local;
struct nfc_llcp_local *res = NULL;
spin_lock(&llcp_devices_lock);
list_for_each_entry(local, &llcp_devices, list)
if (local->dev == dev)
return local;
if (local->dev == dev) {
res = nfc_llcp_local_get(local);
break;
}
spin_unlock(&llcp_devices_lock);
pr_debug("No device found\n");
return res;
}
static struct nfc_llcp_local *nfc_llcp_remove_local(struct nfc_dev *dev)
{
struct nfc_llcp_local *local, *tmp;
spin_lock(&llcp_devices_lock);
list_for_each_entry_safe(local, tmp, &llcp_devices, list)
if (local->dev == dev) {
list_del(&local->list);
spin_unlock(&llcp_devices_lock);
return local;
}
spin_unlock(&llcp_devices_lock);
pr_warn("Shutting down device not found\n");
return NULL;
}
@@ -610,12 +632,15 @@ u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
*general_bytes_len = local->gb_len;
nfc_llcp_local_put(local);
return local->gb;
}
int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len)
{
struct nfc_llcp_local *local;
int err;
if (gb_len < 3 || gb_len > NFC_MAX_GT_LEN)
return -EINVAL;
@@ -632,12 +657,16 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len)
if (memcmp(local->remote_gb, llcp_magic, 3)) {
pr_err("MAC does not support LLCP\n");
return -EINVAL;
err = -EINVAL;
goto out;
}
return nfc_llcp_parse_gb_tlv(local,
err = nfc_llcp_parse_gb_tlv(local,
&local->remote_gb[3],
local->remote_gb_len - 3);
out:
nfc_llcp_local_put(local);
return err;
}
static u8 nfc_llcp_dsap(const struct sk_buff *pdu)
@@ -1527,6 +1556,8 @@ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
__nfc_llcp_recv(local, skb);
nfc_llcp_local_put(local);
return 0;
}
@@ -1543,6 +1574,8 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev)
/* Close and purge all existing sockets */
nfc_llcp_socket_release(local, true, 0);
nfc_llcp_local_put(local);
}
void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
@@ -1568,6 +1601,8 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
mod_timer(&local->link_timer,
jiffies + msecs_to_jiffies(local->remote_lto));
}
nfc_llcp_local_put(local);
}
int nfc_llcp_register_device(struct nfc_dev *ndev)
@@ -1618,7 +1653,7 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
void nfc_llcp_unregister_device(struct nfc_dev *dev)
{
struct nfc_llcp_local *local = nfc_llcp_find_local(dev);
struct nfc_llcp_local *local = nfc_llcp_remove_local(dev);
if (local == NULL) {
pr_debug("No such device\n");

View File

@@ -99,7 +99,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
}
llcp_sock->dev = dev;
llcp_sock->local = nfc_llcp_local_get(local);
llcp_sock->local = local;
llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
llcp_sock->service_name_len = min_t(unsigned int,
llcp_addr.service_name_len,
@@ -181,7 +181,7 @@ static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr,
}
llcp_sock->dev = dev;
llcp_sock->local = nfc_llcp_local_get(local);
llcp_sock->local = local;
llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
nfc_llcp_sock_link(&local->raw_sockets, sk);
@@ -698,24 +698,24 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
if (dev->dep_link_up == false) {
ret = -ENOLINK;
device_unlock(&dev->dev);
goto put_dev;
goto sock_llcp_put_local;
}
device_unlock(&dev->dev);
if (local->rf_mode == NFC_RF_INITIATOR &&
addr->target_idx != local->target_idx) {
ret = -ENOLINK;
goto put_dev;
goto sock_llcp_put_local;
}
llcp_sock->dev = dev;
llcp_sock->local = nfc_llcp_local_get(local);
llcp_sock->local = local;
llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
if (llcp_sock->ssap == LLCP_SAP_MAX) {
nfc_llcp_local_put(llcp_sock->local);
llcp_sock->local = NULL;
ret = -ENOMEM;
goto put_dev;
goto sock_llcp_nullify;
}
llcp_sock->reserved_ssap = llcp_sock->ssap;
@@ -760,9 +760,13 @@ sock_unlink:
sock_llcp_release:
nfc_llcp_put_ssap(local, llcp_sock->ssap);
nfc_llcp_local_put(llcp_sock->local);
sock_llcp_nullify:
llcp_sock->local = NULL;
sock_llcp_put_local:
nfc_llcp_local_put(local);
put_dev:
nfc_put_device(dev);

View File

@@ -1039,11 +1039,14 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
rc = -ENOMEM;
goto exit;
goto put_local;
}
rc = nfc_genl_send_params(msg, local, info->snd_portid, info->snd_seq);
put_local:
nfc_llcp_local_put(local);
exit:
device_unlock(&dev->dev);
@@ -1105,7 +1108,7 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NFC_ATTR_LLC_PARAM_LTO]) {
if (dev->dep_link_up) {
rc = -EINPROGRESS;
goto exit;
goto put_local;
}
local->lto = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_LTO]);
@@ -1117,6 +1120,9 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX])
local->miux = cpu_to_be16(miux);
put_local:
nfc_llcp_local_put(local);
exit:
device_unlock(&dev->dev);
@@ -1172,7 +1178,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
if (rc != 0) {
rc = -EINVAL;
goto exit;
goto put_local;
}
if (!sdp_attrs[NFC_SDP_ATTR_URI])
@@ -1191,7 +1197,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
sdreq = nfc_llcp_build_sdreq_tlv(tid, uri, uri_len);
if (sdreq == NULL) {
rc = -ENOMEM;
goto exit;
goto put_local;
}
tlvs_len += sdreq->tlv_len;
@@ -1201,10 +1207,14 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
if (hlist_empty(&sdreq_list)) {
rc = -EINVAL;
goto exit;
goto put_local;
}
rc = nfc_llcp_send_snl_sdreq(local, &sdreq_list, tlvs_len);
put_local:
nfc_llcp_local_put(local);
exit:
device_unlock(&dev->dev);

View File

@@ -52,6 +52,7 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len);
u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len);
int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb);
struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
int nfc_llcp_local_put(struct nfc_llcp_local *local);
int __init nfc_llcp_init(void);
void nfc_llcp_exit(void);
void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp);

View File

@@ -265,7 +265,6 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
return -ENOBUFS;
fnew->id = f->id;
fnew->res = f->res;
fnew->ifindex = f->ifindex;
fnew->tp = f->tp;

View File

@@ -511,7 +511,6 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
if (fold) {
f->id = fold->id;
f->iif = fold->iif;
f->res = fold->res;
f->handle = fold->handle;
f->tp = fold->tp;

View File

@@ -812,7 +812,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
new->ifindex = n->ifindex;
new->fshift = n->fshift;
new->res = n->res;
new->flags = n->flags;
RCU_INIT_POINTER(new->ht_down, ht);

View File

@@ -2156,6 +2156,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
if (false) {
alloc_skb:
spin_unlock(&other->sk_receive_queue.lock);
unix_state_unlock(other);
mutex_unlock(&unix_sk(other)->iolock);
newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
@@ -2195,6 +2196,7 @@ alloc_skb:
init_scm = false;
}
spin_lock(&other->sk_receive_queue.lock);
skb = skb_peek_tail(&other->sk_receive_queue);
if (tail && tail == skb) {
skb = newskb;
@@ -2225,14 +2227,11 @@ alloc_skb:
refcount_add(size, &sk->sk_wmem_alloc);
if (newskb) {
err = unix_scm_to_skb(&scm, skb, false);
if (err)
goto err_state_unlock;
spin_lock(&other->sk_receive_queue.lock);
unix_scm_to_skb(&scm, skb, false);
__skb_queue_tail(&other->sk_receive_queue, newskb);
spin_unlock(&other->sk_receive_queue.lock);
}
spin_unlock(&other->sk_receive_queue.lock);
unix_state_unlock(other);
mutex_unlock(&unix_sk(other)->iolock);

View File

@@ -2047,6 +2047,38 @@ out:
return result;
}
static int bpf_test_mkdir_and_remove_bpf(const char *mount_dir)
{
const char *dir = "dir";
int result = TEST_FAILURE;
int src_fd = -1;
int bpf_fd = -1;
int fuse_dev = -1;
int fd = -1;
int fd2 = -1;
TEST(src_fd = open(ft_src, O_DIRECTORY | O_RDONLY | O_CLOEXEC),
src_fd != -1);
TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_mkdir_remove", &bpf_fd,
NULL, NULL), 0);
TESTEQUAL(mount_fuse_no_init(mount_dir, bpf_fd, src_fd, &fuse_dev), 0);
TEST(fd = s_mkdir(s_path(s(mount_dir), s(dir)), 0777),
fd != -1);
TEST(fd2 = s_open(s_path(s(mount_dir), s(dir)), O_RDONLY),
fd2 != -1);
result = TEST_SUCCESS;
out:
close(fd2);
close(fd);
close(fuse_dev);
close(bpf_fd);
close(src_fd);
umount(mount_dir);
return result;
}
static void parse_range(const char *ranges, bool *run_test, size_t tests)
{
size_t i;
@@ -2175,7 +2207,7 @@ int main(int argc, char *argv[])
MAKE_TEST(bpf_test_lookup_postfilter),
MAKE_TEST(flock_test),
MAKE_TEST(bpf_test_create_and_remove_bpf),
MAKE_TEST(bpf_test_mkdir_and_remove_bpf),
};
#undef MAKE_TEST

View File

@@ -530,4 +530,26 @@ int createremovebpf_test(struct fuse_bpf_args *fa)
}
}
SEC("test_mkdir_remove")
int mkdirremovebpf_test(struct fuse_bpf_args *fa)
{
switch (fa->opcode) {
case FUSE_LOOKUP | FUSE_PREFILTER: {
return FUSE_BPF_BACKING | FUSE_BPF_POST_FILTER;
}
case FUSE_LOOKUP | FUSE_POSTFILTER: {
struct fuse_entry_bpf_out *febo = fa->out_args[1].value;
febo->bpf_action = FUSE_ACTION_REMOVE;
return 0;
}
case FUSE_OPENDIR | FUSE_PREFILTER: {
return -EIO;
}
default:
return FUSE_BPF_BACKING;
}
}