Snap for 10510783 from a710fa57c0 to android12-5.10-keystone-qcom-release

Change-Id: I332f026a3e4fa7e3a37d419b8af4443529e944dd
This commit is contained in:
Android Build Coastguard Worker
2023-07-18 10:00:52 +00:00
33 changed files with 829 additions and 549 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -172,6 +172,7 @@
__const_udelay
consume_skb
contig_page_data
copy_highpage
__cpu_active_mask
cpu_all_bits
cpu_bit_bitmap
@@ -2114,7 +2115,9 @@
ttm_bo_move_ttm
ttm_bo_put
ttm_bo_unlock_delayed_workqueue
ttm_bo_unmap_virtual
ttm_bo_validate
ttm_bo_wait
ttm_dma_page_alloc_debugfs
ttm_dma_populate
ttm_dma_tt_fini

View File

@@ -143,6 +143,7 @@
cdev_device_del
cdev_init
__cfi_slowpath
cgroup_add_dfl_cftypes
cgroup_add_legacy_cftypes
cgroup_path_ns
cgroup_taskset_first
@@ -2793,6 +2794,15 @@
__traceiter_android_vh_clear_mask_adjust
__traceiter_android_vh_clear_reserved_fmt_fields
__traceiter_android_vh_cma_drain_all_pages_bypass
__traceiter_android_vh_alloc_pages_reclaim_bypass
__traceiter_android_vh_free_unref_page_bypass
__traceiter_android_vh_kvmalloc_node_use_vmalloc
__traceiter_android_vh_should_alloc_pages_retry
__traceiter_android_vh_unreserve_highatomic_bypass
__traceiter_android_vh_pageset_update
__traceiter_android_vh_rmqueue_bulk_bypass
__traceiter_android_vh_tune_mmap_readaround
__traceiter_android_vh_ra_tuning_max_page
__traceiter_android_vh_cleanup_old_buffers_bypass
__traceiter_android_vh_commit_creds
__traceiter_android_vh_cpufreq_acct_update_power
@@ -3049,6 +3059,15 @@
__tracepoint_android_vh_clear_mask_adjust
__tracepoint_android_vh_clear_reserved_fmt_fields
__tracepoint_android_vh_cma_drain_all_pages_bypass
__tracepoint_android_vh_alloc_pages_reclaim_bypass
__tracepoint_android_vh_free_unref_page_bypass
__tracepoint_android_vh_kvmalloc_node_use_vmalloc
__tracepoint_android_vh_should_alloc_pages_retry
__tracepoint_android_vh_unreserve_highatomic_bypass
__tracepoint_android_vh_pageset_update
__tracepoint_android_vh_rmqueue_bulk_bypass
__tracepoint_android_vh_tune_mmap_readaround
__tracepoint_android_vh_ra_tuning_max_page
__tracepoint_android_vh_cleanup_old_buffers_bypass
__tracepoint_android_vh_commit_creds
__tracepoint_android_vh_cpufreq_acct_update_power

View File

@@ -172,10 +172,10 @@ void __meminit init_trampoline_kaslr(void)
set_p4d(p4d_tramp,
__p4d(_KERNPG_TABLE | __pa(pud_page_tramp)));
set_pgd(&trampoline_pgd_entry,
__pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
trampoline_pgd_entry =
__pgd(_KERNPG_TABLE | __pa(p4d_page_tramp));
} else {
set_pgd(&trampoline_pgd_entry,
__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
trampoline_pgd_entry =
__pgd(_KERNPG_TABLE | __pa(pud_page_tramp));
}
}

View File

@@ -332,6 +332,12 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_page_trylock);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_referenced_check_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_drain_all_pages_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_drain_all_pages_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_unref_page_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kvmalloc_node_use_vmalloc);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_should_alloc_pages_retry);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_unreserve_highatomic_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_pageset_update);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rmqueue_bulk_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_pcplist_add_cma_pages_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_event);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_group);
@@ -441,6 +447,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_read_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_tlb_conf);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_node_memcgs);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ra_tuning_max_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_mmap_readaround);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_handle_pte_fault_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_pte_fault_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cow_user_page);

View File

@@ -32,6 +32,7 @@
#include <linux/hiddev.h>
#include <linux/hid-debug.h>
#include <linux/hidraw.h>
#include <linux/uhid.h>
#include "hid-ids.h"
@@ -258,6 +259,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
{
struct hid_report *report;
struct hid_field *field;
unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
unsigned int usages;
unsigned int offset;
unsigned int i;
@@ -288,8 +290,11 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
offset = report->size;
report->size += parser->global.report_size * parser->global.report_count;
if (IS_ENABLED(CONFIG_UHID) && parser->device->ll_driver == &uhid_hid_driver)
max_buffer_size = UHID_DATA_MAX;
/* Total size check: Allow for possible report index byte */
if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
if (report->size > (max_buffer_size - 1) << 3) {
hid_err(parser->device, "report is too long\n");
return -1;
}
@@ -1752,6 +1757,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report;
struct hid_driver *hdrv;
int max_buffer_size = HID_MAX_BUFFER_SIZE;
unsigned int a;
u32 rsize, csize = size;
u8 *cdata = data;
@@ -1768,10 +1774,13 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
rsize = hid_compute_report_size(report);
if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
rsize = HID_MAX_BUFFER_SIZE - 1;
else if (rsize > HID_MAX_BUFFER_SIZE)
rsize = HID_MAX_BUFFER_SIZE;
if (IS_ENABLED(CONFIG_UHID) && hid->ll_driver == &uhid_hid_driver)
max_buffer_size = UHID_DATA_MAX;
if (report_enum->numbered && rsize >= max_buffer_size)
rsize = max_buffer_size - 1;
else if (rsize > max_buffer_size)
rsize = max_buffer_size;
if (csize < rsize) {
dbg_hid("report %d is too short, (%d < %d)\n", report->id,

View File

@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
@@ -38,6 +39,7 @@ struct mbox_test_device {
char *signal;
char *message;
spinlock_t lock;
struct mutex mutex;
wait_queue_head_t waitq;
struct fasync_struct *async_queue;
struct dentry *root_debugfs_dir;
@@ -95,6 +97,7 @@ static ssize_t mbox_test_message_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct mbox_test_device *tdev = filp->private_data;
char *message;
void *data;
int ret;
@@ -110,10 +113,13 @@ static ssize_t mbox_test_message_write(struct file *filp,
return -EINVAL;
}
tdev->message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
if (!tdev->message)
message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
if (!message)
return -ENOMEM;
mutex_lock(&tdev->mutex);
tdev->message = message;
ret = copy_from_user(tdev->message, userbuf, count);
if (ret) {
ret = -EFAULT;
@@ -144,6 +150,8 @@ out:
kfree(tdev->message);
tdev->signal = NULL;
mutex_unlock(&tdev->mutex);
return ret < 0 ? ret : count;
}
@@ -392,6 +400,7 @@ static int mbox_test_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tdev);
spin_lock_init(&tdev->lock);
mutex_init(&tdev->mutex);
if (tdev->rx_channel) {
tdev->rx_buffer = devm_kzalloc(&pdev->dev,

View File

@@ -828,7 +828,7 @@ static void r592_remove(struct pci_dev *pdev)
/* Stop the processing thread.
That ensures that we won't take any more requests */
kthread_stop(dev->io_thread);
del_timer_sync(&dev->detect_timer);
r592_enable_device(dev, false);
while (!error && dev->req) {

View File

@@ -437,6 +437,9 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
goto err;
}
skb_dst_set(skb, &rt->dst);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
err = ip_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;
@@ -475,6 +478,9 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
goto err;
}
skb_dst_set(skb, dst);
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
err = ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;

View File

@@ -180,9 +180,12 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
else
min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
if (max == 0)
if (le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) == 0)
max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */
else
max = clamp_t(u32, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize),
USB_CDC_NCM_NTB_MIN_OUT_SIZE,
CDC_NCM_NTB_MAX_SIZE_TX);
/* some devices set dwNtbOutMaxSize too low for the above default */
min = min(min, max);
@@ -1230,6 +1233,9 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* further.
*/
if (skb_out == NULL) {
/* If even the smallest allocation fails, abort. */
if (ctx->tx_curr_size == USB_CDC_NCM_NTB_MIN_OUT_SIZE)
goto alloc_failed;
ctx->tx_low_mem_max_cnt = min(ctx->tx_low_mem_max_cnt + 1,
(unsigned)CDC_NCM_LOW_MEM_MAX_CNT);
ctx->tx_low_mem_val = ctx->tx_low_mem_max_cnt;
@@ -1248,13 +1254,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
skb_out = alloc_skb(ctx->tx_curr_size, GFP_ATOMIC);
/* No allocation possible so we will abort */
if (skb_out == NULL) {
if (skb != NULL) {
dev_kfree_skb_any(skb);
dev->net->stats.tx_dropped++;
}
goto exit_no_skb;
}
if (!skb_out)
goto alloc_failed;
ctx->tx_low_mem_val--;
}
if (ctx->is_ndp16) {
@@ -1447,6 +1448,11 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
return skb_out;
alloc_failed:
if (skb) {
dev_kfree_skb_any(skb);
dev->net->stats.tx_dropped++;
}
exit_no_skb:
/* Start timer, if there is a remaining non-empty skb */
if (ctx->tx_curr_skb != NULL && n > 0)

View File

@@ -1077,6 +1077,8 @@ static int rkvdec_remove(struct platform_device *pdev)
{
struct rkvdec_dev *rkvdec = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&rkvdec->watchdog_work);
rkvdec_v4l2_cleanup(rkvdec);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);

View File

@@ -2604,13 +2604,16 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
* device-initiated disconnect requires a core soft reset
* (DCTL.CSftRst) before enabling the run/stop bit.
*/
dwc3_core_soft_reset(dwc);
ret = dwc3_core_soft_reset(dwc);
if (ret)
goto done;
dwc3_event_buffers_setup(dwc);
__dwc3_gadget_start(dwc);
ret = dwc3_gadget_run_stop(dwc, true, false);
}
done:
pm_runtime_put(dwc->dev);
return ret;

View File

@@ -3619,6 +3619,7 @@ static void ffs_func_unbind(struct usb_configuration *c,
/* Drain any pending AIO completions */
drain_workqueue(ffs->io_completion_wq);
ffs_event_add(ffs, FUNCTIONFS_UNBIND);
if (!--opts->refcnt)
functionfs_unbind(ffs);
@@ -3643,7 +3644,6 @@ static void ffs_func_unbind(struct usb_configuration *c,
func->function.ssp_descriptors = NULL;
func->interfaces_nums = NULL;
ffs_event_add(ffs, FUNCTIONFS_UNBIND);
}
static struct usb_function *ffs_alloc(struct usb_function_instance *fi)

View File

@@ -2568,6 +2568,7 @@ static int renesas_usb3_remove(struct platform_device *pdev)
debugfs_remove_recursive(usb3->dentry);
device_remove_file(&pdev->dev, &dev_attr_role);
cancel_work_sync(&usb3->role_work);
usb_role_switch_unregister(usb3->role_sw);
usb_del_gadget_udc(&usb3->gadget);

View File

@@ -3270,8 +3270,12 @@ int prepare_to_relocate(struct reloc_control *rc)
*/
return PTR_ERR(trans);
}
btrfs_commit_transaction(trans);
return 0;
ret = btrfs_commit_transaction(trans);
if (ret)
unset_reloc_control(rc);
return ret;
}
static noinline_for_stack int relocate_block_group(struct reloc_control *rc)

View File

@@ -829,6 +829,8 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
bool found = false;
struct bio *target = bio ? *bio : NULL;
f2fs_bug_on(sbi, !target && !page);
for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
struct list_head *head = &io->bio_list;
@@ -2875,7 +2877,8 @@ out:
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_submit_merged_write(sbi, DATA);
f2fs_submit_merged_ipu_write(sbi, bio, NULL);
if (bio && *bio)
f2fs_submit_merged_ipu_write(sbi, bio, NULL);
submitted = NULL;
}

View File

@@ -7,10 +7,9 @@
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/rtmutex.h>
#include "internal.h"
static DEFINE_RT_MUTEX(pmsg_lock);
static DEFINE_MUTEX(pmsg_lock);
static ssize_t write_pmsg(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
@@ -29,9 +28,9 @@ static ssize_t write_pmsg(struct file *file, const char __user *buf,
if (!access_ok(buf, count))
return -EFAULT;
rt_mutex_lock(&pmsg_lock);
mutex_lock(&pmsg_lock);
ret = psinfo->write_user(&record, buf);
rt_mutex_unlock(&pmsg_lock);
mutex_unlock(&pmsg_lock);
return ret ? ret : count;
}

View File

@@ -924,6 +924,16 @@ xlog_recover_buf_commit_pass2(
if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
trace_xfs_log_recover_buf_skip(log, buf_f);
xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
/*
* We're skipping replay of this buffer log item due to the log
* item LSN being behind the ondisk buffer. Verify the buffer
* contents since we aren't going to run the write verifier.
*/
if (bp->b_ops) {
bp->b_ops->verify_read(bp);
error = bp->b_error;
}
goto out_release;
}

View File

@@ -107,7 +107,6 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{
_trace_android_vh_record_pcpu_rwsem_starttime(current, 0);
rwsem_release(&sem->dep_map, _RET_IP_);
preempt_disable();
@@ -130,6 +129,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
this_cpu_dec(*sem->read_count);
rcuwait_wake_up(&sem->writer);
}
_trace_android_vh_record_pcpu_rwsem_starttime(current, 0);
preempt_enable();
}

View File

@@ -192,6 +192,27 @@ DECLARE_HOOK(android_vh_mark_page_accessed,
DECLARE_HOOK(android_vh_cma_drain_all_pages_bypass,
TP_PROTO(unsigned int migratetype, bool *bypass),
TP_ARGS(migratetype, bypass));
DECLARE_HOOK(android_vh_free_unref_page_bypass,
TP_PROTO(struct page *page, int order, int migratetype, bool *bypass),
TP_ARGS(page, order, migratetype, bypass));
DECLARE_HOOK(android_vh_kvmalloc_node_use_vmalloc,
TP_PROTO(size_t size, gfp_t *kmalloc_flags, bool *use_vmalloc),
TP_ARGS(size, kmalloc_flags, use_vmalloc));
DECLARE_HOOK(android_vh_should_alloc_pages_retry,
TP_PROTO(gfp_t gfp_mask, int order, int *alloc_flags,
int migratetype, struct zone *preferred_zone, struct page **page, bool *should_alloc_retry),
TP_ARGS(gfp_mask, order, alloc_flags,
migratetype, preferred_zone, page, should_alloc_retry));
DECLARE_HOOK(android_vh_unreserve_highatomic_bypass,
TP_PROTO(bool force, struct zone *zone, bool *skip_unreserve_highatomic),
TP_ARGS(force, zone, skip_unreserve_highatomic));
DECLARE_HOOK(android_vh_pageset_update,
TP_PROTO(unsigned long *high, unsigned long *batch),
TP_ARGS(high, batch));
DECLARE_HOOK(android_vh_rmqueue_bulk_bypass,
TP_PROTO(unsigned int order, struct per_cpu_pages *pcp, int migratetype,
struct list_head *list),
TP_ARGS(order, pcp, migratetype, list));
DECLARE_HOOK(android_vh_pcplist_add_cma_pages_bypass,
TP_PROTO(int migratetype, bool *bypass),
TP_ARGS(migratetype, bypass));
@@ -201,6 +222,10 @@ DECLARE_HOOK(android_vh_subpage_dma_contig_alloc,
DECLARE_HOOK(android_vh_ra_tuning_max_page,
TP_PROTO(struct readahead_control *ractl, unsigned long *max_page),
TP_ARGS(ractl, max_page));
DECLARE_HOOK(android_vh_tune_mmap_readaround,
TP_PROTO(unsigned int ra_pages, pgoff_t pgoff,
pgoff_t *start, unsigned int *size, unsigned int *async_size),
TP_ARGS(ra_pages, pgoff, start, size, async_size));
DECLARE_RESTRICTED_HOOK(android_rvh_handle_pte_fault_end,
TP_PROTO(struct vm_fault *vmf, unsigned long highest_memmap_pfn),
TP_ARGS(vmf, highest_memmap_pfn), 1);

View File

@@ -5976,6 +5976,8 @@ static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
struct io_kiocb *preq;
int ret2, ret = 0;
io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
spin_lock(&ctx->completion_lock);
preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
if (!preq || !io_poll_disarm(preq)) {
@@ -6007,6 +6009,7 @@ out:
req_set_fail(req);
/* complete update request, we're done with it */
io_req_complete(req, ret);
io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
return 0;
}

View File

@@ -4219,6 +4219,7 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
cft->flags |= __CFTYPE_ONLY_ON_DFL;
return cgroup_add_cftypes(ss, cfts);
}
EXPORT_SYMBOL_GPL(cgroup_add_dfl_cftypes);
/**
* cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies

View File

@@ -760,12 +760,12 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
*/
void __sched mutex_unlock(struct mutex *lock)
{
trace_android_vh_record_mutex_lock_starttime(current, 0);
#ifndef CONFIG_DEBUG_LOCK_ALLOC
if (__mutex_unlock_fast(lock))
return;
#endif
__mutex_unlock_slowpath(lock, _RET_IP_);
trace_android_vh_record_mutex_lock_starttime(current, 0);
}
EXPORT_SYMBOL(mutex_unlock);

View File

@@ -258,7 +258,6 @@ EXPORT_SYMBOL_GPL(percpu_down_write);
void percpu_up_write(struct percpu_rw_semaphore *sem)
{
trace_android_vh_record_pcpu_rwsem_starttime(current, 0);
rwsem_release(&sem->dep_map, _RET_IP_);
/*
@@ -284,6 +283,7 @@ void percpu_up_write(struct percpu_rw_semaphore *sem)
* exclusive write lock because its counting.
*/
rcu_sync_exit(&sem->rss);
trace_android_vh_record_pcpu_rwsem_starttime(current, 0);
}
EXPORT_SYMBOL_GPL(percpu_up_write);

View File

@@ -1077,7 +1077,8 @@ static size_t relay_file_read_start_pos(struct rchan_buf *buf)
size_t subbuf_size = buf->chan->subbuf_size;
size_t n_subbufs = buf->chan->n_subbufs;
size_t consumed = buf->subbufs_consumed % n_subbufs;
size_t read_pos = consumed * subbuf_size + buf->bytes_consumed;
size_t read_pos = (consumed * subbuf_size + buf->bytes_consumed)
% (n_subbufs * subbuf_size);
read_subbuf = read_pos / subbuf_size;
padding = buf->padding[read_subbuf];

View File

@@ -2661,6 +2661,8 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
ra->size = ra->ra_pages;
ra->async_size = ra->ra_pages / 4;
trace_android_vh_tune_mmap_readaround(ra->ra_pages, vmf->pgoff,
&ra->start, &ra->size, &ra->async_size);
ractl._index = ra->start;
do_page_cache_ra(&ractl, ra->size, ra->async_size);
return fpin;

View File

@@ -246,6 +246,16 @@ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long addr)
{
pgtable_t token = pmd_pgtable(*pmd);
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
/*
* Ensure page table destruction is blocked if __pte_map_lock managed
* to take this lock. Without this barrier tlb_remove_table_rcu can
* destroy ptl after __pte_map_lock locked it and during unlock would
* cause a use-after-free.
*/
spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
spin_unlock(ptl);
#endif
pmd_clear(pmd);
pte_free_tlb(tlb, token, addr);
mm_dec_nr_ptes(tlb->mm);
@@ -2627,9 +2637,7 @@ EXPORT_SYMBOL_GPL(apply_to_page_range);
static bool pte_spinlock(struct vm_fault *vmf)
{
bool ret = false;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_t pmdval;
#endif
/* Check if vma is still valid */
if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
@@ -2644,24 +2652,28 @@ static bool pte_spinlock(struct vm_fault *vmf)
goto out;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* We check if the pmd value is still the same to ensure that there
* is not a huge collapse operation in progress in our back.
* It also ensures that pmd was not cleared by pmd_clear in
* free_pte_range and ptl is still valid.
*/
pmdval = READ_ONCE(*vmf->pmd);
if (!pmd_same(pmdval, vmf->orig_pmd)) {
trace_spf_pmd_changed(_RET_IP_, vmf->vma, vmf->address);
goto out;
}
#endif
vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
vmf->ptl = pte_lockptr(vmf->vma->vm_mm, &pmdval);
if (unlikely(!spin_trylock(vmf->ptl))) {
trace_spf_pte_lock(_RET_IP_, vmf->vma, vmf->address);
goto out;
}
/*
* The check below will fail if pte_spinlock passed its ptl barrier
* before we took the ptl lock.
*/
if (vma_has_changed(vmf)) {
spin_unlock(vmf->ptl);
trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address);
@@ -2679,9 +2691,7 @@ static bool __pte_map_lock_speculative(struct vm_fault *vmf, unsigned long addr)
bool ret = false;
pte_t *pte;
spinlock_t *ptl;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_t pmdval;
#endif
/*
* The first vma_has_changed() guarantees the page-tables are still
@@ -2696,7 +2706,6 @@ static bool __pte_map_lock_speculative(struct vm_fault *vmf, unsigned long addr)
goto out;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* We check if the pmd value is still the same to ensure that there
* is not a huge collapse operation in progress in our back.
@@ -2706,7 +2715,6 @@ static bool __pte_map_lock_speculative(struct vm_fault *vmf, unsigned long addr)
trace_spf_pmd_changed(_RET_IP_, vmf->vma, addr);
goto out;
}
#endif
/*
* Same as pte_offset_map_lock() except that we call
@@ -2715,14 +2723,18 @@ static bool __pte_map_lock_speculative(struct vm_fault *vmf, unsigned long addr)
* to invalidate TLB but this CPU has irq disabled.
* Since we are in a speculative patch, accept it could fail
*/
ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
pte = pte_offset_map(vmf->pmd, addr);
ptl = pte_lockptr(vmf->vma->vm_mm, &pmdval);
pte = pte_offset_map(&pmdval, addr);
if (unlikely(!spin_trylock(ptl))) {
pte_unmap(pte);
trace_spf_pte_lock(_RET_IP_, vmf->vma, addr);
goto out;
}
/*
* The check below will fail if __pte_map_lock_speculative passed its ptl
* barrier before we took the ptl lock.
*/
if (vma_has_changed(vmf)) {
pte_unmap_unlock(pte, ptl);
trace_spf_vma_changed(_RET_IP_, vmf->vma, addr);

View File

@@ -1608,11 +1608,16 @@ static void __free_pages_ok(struct page *page, unsigned int order,
unsigned long flags;
int migratetype;
unsigned long pfn = page_to_pfn(page);
bool skip_free_unref_page = false;
if (!free_pages_prepare(page, order, true, fpi_flags))
return;
migratetype = get_pfnblock_migratetype(page, pfn);
trace_android_vh_free_unref_page_bypass(page, order, migratetype, &skip_free_unref_page);
if (skip_free_unref_page)
return;
local_irq_save(flags);
__count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, pfn, order, migratetype,
@@ -2791,6 +2796,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
struct page *page;
int order;
bool ret;
bool skip_unreserve_highatomic = false;
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
ac->nodemask) {
@@ -2802,6 +2808,11 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
pageblock_nr_pages)
continue;
trace_android_vh_unreserve_highatomic_bypass(force, zone,
&skip_unreserve_highatomic);
if (skip_unreserve_highatomic)
continue;
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
struct free_area *area = &(zone->free_area[order]);
@@ -3047,6 +3058,10 @@ static struct list_head *get_populated_pcp_list(struct zone *zone,
struct list_head *list = &pcp->lists[migratetype];
if (list_empty(list)) {
trace_android_vh_rmqueue_bulk_bypass(order, pcp, migratetype, list);
if (!list_empty(list))
return list;
pcp->count += rmqueue_bulk(zone, order,
pcp->batch, list,
migratetype, alloc_flags);
@@ -3343,10 +3358,17 @@ void free_unref_page(struct page *page)
{
unsigned long flags;
unsigned long pfn = page_to_pfn(page);
int migratetype;
bool skip_free_unref_page = false;
if (!free_unref_page_prepare(page, pfn))
return;
migratetype = get_pfnblock_migratetype(page, pfn);
trace_android_vh_free_unref_page_bypass(page, 0, migratetype, &skip_free_unref_page);
if (skip_free_unref_page)
return;
local_irq_save(flags);
free_unref_page_commit(page, pfn);
local_irq_restore(flags);
@@ -4822,6 +4844,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
unsigned int zonelist_iter_cookie;
int reserve_flags;
unsigned long vh_record;
bool should_alloc_retry = false;
trace_android_vh_alloc_pages_slowpath_begin(gfp_mask, order, &vh_record);
/*
@@ -4962,6 +4985,12 @@ retry:
if (page)
goto got_pg;
trace_android_vh_should_alloc_pages_retry(gfp_mask, order,
&alloc_flags, ac->migratetype, ac->preferred_zoneref->zone,
&page, &should_alloc_retry);
if (should_alloc_retry)
goto retry;
/* Try direct reclaim and then allocating */
page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
&did_some_progress);
@@ -6604,6 +6633,7 @@ static int zone_batchsize(struct zone *zone)
static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
unsigned long batch)
{
trace_android_vh_pageset_update(&high, &batch);
/* start with a fail safe value for batch */
pcp->batch = 1;
smp_wmb();

View File

@@ -29,6 +29,7 @@
#include "internal.h"
#ifndef __GENKSYMS__
#include <trace/hooks/syscall_check.h>
#include <trace/hooks/mm.h>
#endif
/**
@@ -587,6 +588,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
{
gfp_t kmalloc_flags = flags;
void *ret;
bool use_vmalloc = false;
/*
* vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
@@ -595,6 +597,10 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
if ((flags & GFP_KERNEL) != GFP_KERNEL)
return kmalloc_node(size, flags, node);
trace_android_vh_kvmalloc_node_use_vmalloc(size, &kmalloc_flags, &use_vmalloc);
if (use_vmalloc)
goto use_vmalloc_node;
/*
* We want to attempt a large physically contiguous block first because
* it is less likely to fragment multiple larger blocks and therefore
@@ -624,6 +630,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
return NULL;
}
use_vmalloc_node:
return __vmalloc_node(size, 1, flags, node,
__builtin_return_address(0));
}

View File

@@ -300,6 +300,10 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
write_unlock(&xen_9pfs_lock);
for (i = 0; i < priv->num_rings; i++) {
struct xen_9pfs_dataring *ring = &priv->rings[i];
cancel_work_sync(&ring->work);
if (!priv->rings[i].intf)
break;
if (priv->rings[i].irq > 0)

View File

@@ -996,7 +996,14 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
if (hci_sock_gen_cookie(sk)) {
struct sk_buff *skb;
if (capable(CAP_NET_ADMIN))
/* Perform careful checks before setting the HCI_SOCK_TRUSTED
* flag. Make sure that not only the current task but also
* the socket opener has the required capability, since
* privileged programs can be tricked into making ioctl calls
* on HCI sockets, and the socket should not be marked as
* trusted simply because the ioctl caller is privileged.
*/
if (sk_capable(sk, CAP_NET_ADMIN))
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
/* Send event to monitor */

View File

@@ -1086,6 +1086,9 @@ static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
if (option_len > sizeof(struct geneve_opt))
data_len = option_len - sizeof(struct geneve_opt);
if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
return -ERANGE;
opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
memset(opt, 0xff, option_len);
opt->length = data_len / 4;

View File

@@ -716,12 +716,18 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
struct nlattr *est, bool ovr,
struct netlink_ext_ack *extack)
{
int err;
int err, ifindex = -1;
err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, true, extack);
if (err < 0)
return err;
if (tb[TCA_U32_INDEV]) {
ifindex = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
if (ifindex < 0)
return -EINVAL;
}
if (tb[TCA_U32_LINK]) {
u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
struct tc_u_hnode *ht_down = NULL, *ht_old;
@@ -756,13 +762,9 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
tcf_bind_filter(tp, &n->res, base);
}
if (tb[TCA_U32_INDEV]) {
int ret;
ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
if (ret < 0)
return -EINVAL;
n->ifindex = ret;
}
if (ifindex >= 0)
n->ifindex = ifindex;
return 0;
}