Merge branch 'android13-5.15' into branch 'android13-5.15-lts'

This is the merge of the android13-5.15 branch to catch up with many
changes that have happened there.

It contains the following commits:

*   6e221c28b0 Merge "Merge tag 'android13-5.15.119_r00' into android13-5.15" into android13-5.15
|\
| * 76a6390cf6 Merge tag 'android13-5.15.119_r00' into android13-5.15
* | 3a4b0ffd38 UPSTREAM: media: dvb-core: Fix kernel WARNING for blocking operation in wait_event*()
* | 1bff58d800 ANDROID: abi_gki_aarch64_qcom: update abi
* | aa25f16d54 UPSTREAM: ASoC: soc-pcm: Fix DPCM lockdep warning due to nested stream locks
* | 1fe4e6a16a UPSTREAM: ASoC: soc-pcm.c: call __soc_pcm_close() in soc_pcm_close()
* | 553517df16 UPSTREAM: ASoC: soc-pcm: Move debugfs removal out of spinlock
* | 8930f00e36 ANDROID: GKI: Update the ABI for ASoC fixes
* | 012e578723 BACKPORT: ASoC: soc-pcm: fix BE handling of PAUSE_RELEASE
* | fc213aa3a6 BACKPORT: ASoC: soc-pcm: test refcount before triggering
* | cdaafbf2de UPSTREAM: ASoC: soc-pcm: serialize BE triggers
* | a64a5028e9 ANDROID: GKI: Update the ABI xml
* | bccc1a4be8 BACKPORT: ASoC: soc-pcm: Fix and cleanup DPCM locking
* | 957ddd4381 ANDROID: ABI: Update pixel symbol list
* | fff5759ca8 ANDROID: ABI: update gki symbol list
* | 79353327f1 ANDROID: Add vendor hook for cma adjusting
* | dbd50f6113 UPSTREAM: squashfs: always build "file direct" version of page actor
* | 70b4d2cec4 UPSTREAM: squashfs: fix cache race with migration
* | b3247f739a BACKPORT: squashfs: cache partial compressed blocks
* | 51dde019a5 UPSTREAM: squashfs: fix buffer release race condition in readahead code
* | 9d967debc7 UPSTREAM: squashfs: fix extending readahead beyond end of file
* | c194120949 UPSTREAM: squashfs: fix read regression introduced in readahead code
* | d35f75a39f UPSTREAM: squashfs: don't call kmalloc in decompressors
* | 357f4067df UPSTREAM: squashfs: don't use intermediate buffer if pages missing
* | 2004e9c6f8 UPSTREAM: squashfs: extend "page actor" to handle missing pages
* | 83db775f93 UPSTREAM: squashfs: support reading fragments in readahead call
* | 85a9a0f602 UPSTREAM: squashfs: implement readahead
* | ae5b9259d4 FROMLIST: fuse: revalidate: don't invalidate if interrupted
* | 982656a617 UPSTREAM: gfs2: Don't deref jdesc in evict
* | 0050e63d6c ANDROID: ABI: Update symbols to unisoc whitelist
* | 8dd49c1005 ANDROID: Incremental fs: Allocate data buffer based on input request size
* | c326542e5d ANDROID: GKI: Update symbol list for Amlogic
* | 7364bcf623 ANDROID: KVM: arm64: Fix MMU context save/restore over TLB invalidation
* | ab27b98371 Revert "ANDROID: mm/slab: Create 64-byte slab caches if the hardware supports it"
* | 4b24277c7b UPSTREAM: wifi: cfg80211: fix link del callback to call correct handler
* | e4743dd009 UPSTREAM: wifi: cfg80211: reject bad AP MLD address
* | 05acbbe287 UPSTREAM: KVM: arm64: Populate fault info for watchpoint
* | 106014fc6b UPSTREAM: media: pvrusb2: fix DVB_CORE dependency
* | c91d54a43e BACKPORT: iommu/mediatek: Set dma_mask for PGTABLE_PA_35_EN
* | 2e5c9d754f UPSTREAM: Revert "kasan: drop skip_kasan_poison variable in free_pages_prepare"
* | 0bf333f6c7 UPSTREAM: wifi: cfg80211: fix MLO connection ownership
* | d809ff52f8 UPSTREAM: wifi: nl80211: fix NULL-ptr deref in offchan check
* | 5328327822 ANDROID: vendor_hooks: Supplement the missing hook call point.
* | 7b71c15649 ANDROID: GKI: add symbol list for Mediatek
* | 84d2796ecd ANDROID: devlink: Select NET_DEVLINK in Kconfig.gki
* | aa7bd5a808 ANDROID: GKI: Update symbols to symbol list
* | 024628cc92 ANDROID: vendor_hook: Add hook to abort reclaim and compaction
* | 9d47ecd070 ANDROID: add a new symbol to symbol list for unisoc
* | d75757f165 ANDROID: mm: Add a vendor hook for slab page alloced checking
|/
* 692292011e ANDROID: GKI: Update symbol list for sunxi
* 1f8c32b161 ANDROID: ABI: update gki symbol list
* a03eac4b77 ANDROID: add new vender hook to fix bug
* c2a56c48f9 UPSTREAM: tick/broadcast: Make broadcast device replacement work correctly
* 3e28748194 UPSTREAM: usb: dwc3: gadget: Return -ESHUTDOWN on ep disable
* 70c18ca7e0 ANDROID: gki_config: enable CONFIG_RELAY
* 10a17dca3d BACKPORT: cgroup/cpuset: Free DL BW in case can_attach() fails
* 64668f1e23 BACKPORT: sched/deadline: Create DL BW alloc, free & check overflow interface
* bba2dd990d FROMGIT: cgroup/cpuset: Iterate only if DEADLINE tasks are present
* b62781fbe5 BACKPORT: sched/cpuset: Keep track of SCHED_DEADLINE task in cpusets
* 723ec5bf9e BACKPORT: sched/cpuset: Bring back cpuset_mutex
* 2acbd62dbc FROMGIT: cgroup/cpuset: Rename functions dealing with DEADLINE accounting
* 2c074c640c UPSTREAM: cgroup/cpuset: Wake up cpuset_attach_wq tasks in cpuset_cancel_attach()
* ae103a8923 UPSTREAM: relayfs: fix out-of-bounds access in relay_file_read
* 2592230918 UPSTREAM: usb: gadget: udc: renesas_usb3: Fix use after free bug in renesas_usb3_remove due to race condition
* 51ccf386ec ANDROID: GKI: Update symbols to symbol list
* 862230c346 ANDROID: vendor_hook: Add hook to tune readaround size
* dd8ee9a8ab ANDROID: add for tuning readahead size
* aa47cc7c20 ANDROID: vendor_hooks: Add hooks to avoid key threads stalled in memory allocations
* 5b7049340a UPSTREAM: io_uring: hold uring mutex around poll removal
* 1cd4d4fea4 Revert "FROMLIST: [PATCH v2] tick/broadcast: Do not set oneshot_mask except was_periodic was true"
* ea6c11624c UPSTREAM: x86/mm: Avoid using set_pgd() outside of real PGD pages
* 492837d7f9 UPSTREAM: netfilter: nf_tables: incorrect error path handling with NFT_MSG_NEWRULE
* 2ed4bff730 ANDROID: GKI: Update symbol list for Amlogic
* 5603b75357 UPSTREAM: media: rkvdec: fix use after free bug in rkvdec_remove
* a68e7f0e6f UPSTREAM: net/sched: flower: fix possible OOB write in fl_set_geneve_opt()
* 99ad7cb47e UPSTREAM: f2fs: fix to avoid use-after-free for cached IPU bio
* a77c2b7ef7 UPSTREAM: ASoC: soc-pcm: align BE 'atomicity' with that of the FE
* fc49127a80 UPSTREAM: ipvlan:Fix out-of-bounds caused by unclear skb->cb
* 9a54f9ba70 UPSTREAM: ASoC: soc-pcm: use GFP_ATOMIC for dpcm structure
* 81eae26664 UPSTREAM: Revert "Fix XFRM-I support for nested ESP tunnels"
* 0bae509c87 UPSTREAM: net/sched: cls_u32: Fix reference counter leak leading to overflow
* 6d0374605a ANDROID: GKI: Update symbol list for Amlogic
* 886679684e ANDROID: ABI: Update oplus symbol list
* 45ad9590eb ANDROID: Export memcg functions to allow module to add new files
* ba2ca1ed90 ANDROID: Update the ABI representation
* 46191df796 ANDROID: vendor_hooks: Export direct reclaim trace points
* 2125796efb ANDROID: fuse-bpf: Move FUSE_RELEASE to correct place
* b5f3851362 ANDROID: fuse-bpf: Ensure bpf field can never be nulled
* d36d65559b ANDROID: GKI: Increase CMA areas to 32
* 0e10eb9bbb ANDROID: HID: Only utilise UHID provided exports if UHID is enabled
* 619b61e8a1 ANDROID: GKI: Update symbol list for xiaomi
* f37fe29d89 ANDROID: build.config.allmodconfig: Build-in HID and UHID
* 6804500c16 ANDROID: HID; Over-ride default maximum buffer size when using UHID
* d384eb48ee UPSTREAM: memstick: r592: Fix UAF bug in r592_remove due to race condition
* c6d386b8a6 BACKPORT: scsi: ufs: core: Fix devfreq deadlocks
* 6bcf6fdd36 UPSTREAM: xfs: verify buffer contents when we skip log replay
* 902976a837 UPSTREAM: bluetooth: Perform careful capability checks in hci_sock_ioctl()
* acdef49259 UPSTREAM: f2fs: skip GC if possible when checkpoint disabling
* 2501e448aa ANDROID: ABI: update gki symbol list
* 0574ecdc5e ANDROID: Add vendor hook for highmem movable page allocations
* 43bb029673 ANDROID: Add vendor hook for precondition check of use_cma_first
* 0a52bf2972 BACKPORT: FROMGIT: mm: skip CMA pages when they are not available
* 087877d515 BACKPORT: FROMGIT: mm: optimization on page allocation when CMA enabled
* 9ab065d281 ANDROID: GKI: Update symbol list for Amlogic

Change-Id: I83e81c652f2eee113a652c667b15740f8573df4a
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2023-07-24 12:14:10 +00:00
73 changed files with 4074 additions and 1484 deletions

View File

@@ -338,10 +338,6 @@
connected to one of 16 gameports
Format: <type1>,<type2>,..<type16>
android_kmalloc_64_create [MM]
Creates all kmalloc variants of the 64-byte slab cache,
if the hardware supports it.
apc= [HW,SPARC]
Power management functions (SPARCstation-4/5 + deriv.)
Format: noidle

File diff suppressed because it is too large Load Diff

View File

@@ -2,16 +2,19 @@
add_cpu
add_timer
add_uevent_var
add_wait_queue
adjust_managed_page_count
alloc_anon_inode
alloc_chrdev_region
alloc_etherdev_mqs
alloc_netdev_mqs
__alloc_pages
alloc_pages_exact
__alloc_percpu
__alloc_percpu_gfp
__alloc_skb
alloc_workqueue
android_rvh_probe_register
anon_inode_getfd
anon_inode_getfile
arc4_crypt
@@ -21,6 +24,7 @@
__arch_copy_to_user
arm64_const_caps_ready
arm64_use_ng_mappings
__arm_smccc_hvc
__arm_smccc_smc
atomic_notifier_call_chain
atomic_notifier_chain_register
@@ -40,8 +44,10 @@
__bitmap_and
__bitmap_andnot
__bitmap_clear
bitmap_find_next_zero_area_off
bitmap_free
__bitmap_or
bitmap_parselist
__bitmap_set
__bitmap_shift_left
__bitmap_shift_right
@@ -82,10 +88,14 @@
bpf_trace_run5
bpf_trace_run6
bpf_trace_run7
bpf_trace_run8
bpf_warn_invalid_xdp_action
__bread_gfp
__brelse
bus_for_each_dev
bus_register
bus_register_notifier
bus_unregister
bus_unregister_notifier
cache_line_size
call_rcu
@@ -115,6 +125,10 @@
clear_nlink
clear_page
__ClearPageMovable
clk_bulk_disable
clk_bulk_enable
clk_bulk_prepare
clk_bulk_unprepare
clk_disable
clk_divider_ops
clk_enable
@@ -171,6 +185,7 @@
__const_udelay
consume_skb
contig_page_data
copy_from_kernel_nofault
copy_page_from_iter_atomic
cpu_all_bits
cpu_bit_bitmap
@@ -240,6 +255,7 @@
crypto_unregister_shash
crypto_unregister_skcipher
__crypto_xor
css_next_descendant_pre
csum_ipv6_magic
csum_partial
_ctype
@@ -253,10 +269,12 @@
debugfs_create_file
debugfs_create_u32
debugfs_create_u64
debugfs_lookup
debugfs_remove
debugfs_rename
dec_zone_page_state
default_llseek
default_wake_function
delayed_work_timer_fn
del_gendisk
del_timer
@@ -267,6 +285,7 @@
dev_alloc_name
__dev_change_net_namespace
dev_close
_dev_crit
dev_driver_string
_dev_err
dev_err_probe
@@ -274,6 +293,7 @@
dev_fwnode
__dev_get_by_index
dev_get_by_index
__dev_get_by_name
dev_get_by_name
dev_get_regmap
device_add
@@ -292,6 +312,7 @@
device_link_add
device_link_del
device_property_present
device_property_read_string
device_property_read_u32_array
device_register
device_remove_file
@@ -304,6 +325,7 @@
__dev_kfree_skb_any
devm_add_action
devm_alloc_etherdev_mqs
devm_clk_bulk_get_all
devm_clk_get
devm_clk_get_optional
devm_clk_hw_register
@@ -329,11 +351,15 @@
devm_kasprintf
devm_kfree
devm_kmalloc
devm_kmemdup
devm_kstrdup
devm_kvasprintf
devm_mbox_controller_register
devm_nvmem_cell_get
devm_of_clk_add_hw_provider
devm_of_phy_get
devm_of_pwm_get
devm_pci_alloc_host_bridge
devm_pci_remap_cfg_resource
devm_phy_get
devm_pinctrl_get
@@ -345,6 +371,7 @@
devm_regmap_field_alloc
__devm_regmap_init_i2c
__devm_regmap_init_mmio_clk
devm_regulator_bulk_get
devm_regulator_get
devm_regulator_get_optional
devm_regulator_put
@@ -427,6 +454,7 @@
dma_fence_default_wait
dma_fence_free
dma_fence_get_status
dma_fence_get_stub
dma_fence_init
dma_fence_release
dma_fence_remove_callback
@@ -436,6 +464,7 @@
dma_free_attrs
dma_heap_add
dma_heap_buffer_alloc
dma_heap_buffer_free
dma_heap_find
dma_heap_get_dev
dma_heap_get_drvdata
@@ -503,6 +532,7 @@
__drm_atomic_helper_private_obj_duplicate_state
drm_atomic_helper_resume
drm_atomic_helper_set_config
drm_atomic_helper_setup_commit
drm_atomic_helper_suspend
drm_atomic_helper_swap_state
drm_atomic_helper_update_plane
@@ -679,6 +709,7 @@
_find_last_bit
_find_next_bit
__find_vma
find_vm_area
find_vpid
finish_wait
flow_block_cb_setup_simple
@@ -697,6 +728,7 @@
free_netdev
__free_pages
free_pages
free_pages_exact
free_percpu
freezing_slow_path
fs_bio_set
@@ -704,7 +736,20 @@
fs_param_is_u32
__fs_parse
fsync_bdev
fwnode_device_is_available
fwnode_get_name
fwnode_graph_get_next_endpoint
fwnode_graph_get_port_parent
fwnode_graph_get_remote_endpoint
fwnode_graph_get_remote_port_parent
fwnode_graph_parse_endpoint
fwnode_handle_get
fwnode_handle_put
fwnode_property_get_reference_args
fwnode_property_present
fwnode_property_read_string
fwnode_property_read_u32_array
fwnode_property_read_u64_array
gcd
generic_block_bmap
generic_fh_to_dentry
@@ -717,6 +762,7 @@
generic_file_splice_read
__generic_file_write_iter
generic_fillattr
generic_handle_irq
generic_permission
generic_read_dir
generic_write_checks
@@ -738,7 +784,10 @@
gen_pool_best_fit
gen_pool_create
gen_pool_destroy
gen_pool_first_fit_align
gen_pool_first_fit_order_align
gen_pool_free_owner
gen_pool_has_addr
gen_pool_set_algo
gen_pool_size
gen_pool_virt_to_phys
@@ -749,8 +798,10 @@
get_device
get_device_system_crosststamp
__get_free_pages
get_kernel_pages
get_net_ns_by_fd
get_net_ns_by_pid
get_pfnblock_flags_mask
get_random_bytes
get_random_u32
get_random_u64
@@ -765,16 +816,19 @@
gpiochip_generic_free
gpiochip_generic_request
gpiochip_get_data
gpiod_count
gpiod_direction_input
gpiod_direction_output
gpiod_direction_output_raw
gpiod_get
gpiod_get_index
gpiod_get_optional
gpiod_get_raw_value
gpiod_get_raw_value_cansleep
gpiod_get_value
gpiod_get_value_cansleep
gpiod_put
gpiod_set_consumer_name
gpiod_set_raw_value
gpiod_set_raw_value_cansleep
gpiod_set_value
@@ -784,6 +838,8 @@
gpio_request
gpio_to_desc
grab_cache_page_write_begin
handle_fasteoi_irq
handle_simple_irq
handle_sysrq
hdmi_audio_infoframe_init
hdmi_audio_infoframe_pack
@@ -807,6 +863,8 @@
__hw_addr_init
__hw_addr_sync
__hw_addr_unsync
hwspin_lock_register
hwspin_lock_unregister
i2c_adapter_type
i2c_add_adapter
i2c_bus_type
@@ -849,6 +907,7 @@
ilookup
inc_nlink
inc_zone_page_state
in_egroup_p
inet_csk_get_port
init_net
init_pseudo
@@ -865,6 +924,7 @@
inode_init_once
inode_init_owner
inode_needs_sync
inode_newsize_ok
inode_nohighmem
inode_set_bytes
input_allocate_device
@@ -877,9 +937,14 @@
invalidate_bdev
invalidate_inode_buffers
iomem_resource
iommu_device_register
iommu_device_sysfs_add
iommu_device_sysfs_remove
iommu_device_unregister
__ioremap
ioremap_cache
iounmap
iov_iter_init
iov_iter_revert
iov_iter_zero
iput
@@ -891,14 +956,22 @@
irq_chip_set_type_parent
irq_chip_unmask_parent
irq_create_fwspec_mapping
irq_dispose_mapping
__irq_domain_add
irq_domain_alloc_irqs_parent
irq_domain_create_hierarchy
irq_domain_free_irqs_parent
irq_domain_get_irq_data
irq_domain_remove
irq_domain_set_hwirq_and_chip
irq_domain_set_info
irq_find_matching_fwspec
irq_of_parse_and_map
__irq_resolve_mapping
irq_set_affinity_hint
irq_set_chained_handler_and_data
__irq_set_handler
irq_set_handler_data
irq_set_irq_wake
irq_stat
irq_to_desc
@@ -915,10 +988,12 @@
kasan_flag_enabled
kasprintf
kernel_cpustat
kernel_kobj
kernel_neon_begin
kernel_neon_end
kernel_param_lock
kernel_param_unlock
kernel_restart
kernel_sigaction
kern_mount
kern_unmount
@@ -940,6 +1015,7 @@
kfree_sensitive
kfree_skb
kfree_skb_list
kfree_skb_reason
kill_anon_super
kill_block_super
kill_fasync
@@ -954,12 +1030,14 @@
kmem_cache_destroy
kmem_cache_free
kmemdup
kobject_create_and_add
kobject_init_and_add
kobject_put
kobject_uevent
kobject_uevent_env
krealloc
kstrdup
kstrdup_const
kstrtobool
kstrtobool_from_user
kstrtoint
@@ -971,6 +1049,7 @@
kstrtouint
kstrtouint_from_user
kstrtoull
kthread_bind
kthread_create_on_node
__kthread_init_worker
kthread_queue_work
@@ -993,7 +1072,10 @@
led_classdev_register_ext
led_classdev_unregister
led_trigger_blink_oneshot
led_trigger_event
led_trigger_register
led_trigger_register_simple
led_trigger_unregister
led_trigger_unregister_simple
__list_add_valid
__list_del_entry_valid
@@ -1012,10 +1094,12 @@
mark_buffer_dirty
__mark_inode_dirty
mbox_chan_received_data
mbox_chan_txdone
mbox_controller_register
mbox_controller_unregister
mbox_free_channel
mbox_request_channel
mbox_request_channel_byname
mbox_send_message
mdiobus_alloc_size
mdiobus_free
@@ -1025,18 +1109,23 @@
mdiobus_write
mdio_device_create
mdio_device_free
memchr
memcmp
memcpy
__memcpy_fromio
__memcpy_toio
memdup_user
memmove
memory_cgrp_subsys_enabled_key
memparse
memremap
mem_section
memset64
memset
__memset_io
memstart_addr
memunmap
migrate_pages
misc_deregister
misc_register
__mmap_lock_do_trace_acquire_returned
@@ -1046,6 +1135,9 @@
mmc_alloc_host
mmc_card_is_blockaddr
__mmc_claim_host
mmc_cmdq_disable
mmc_cmdq_enable
mmc_cqe_request_done
mmc_detect_change
mmc_free_host
mmc_gpio_get_cd
@@ -1093,6 +1185,7 @@
netdev_err
netdev_info
netdev_pick_tx
netdev_printk
netdev_rss_key_fill
netdev_set_default_ethtool_ops
netdev_update_features
@@ -1110,6 +1203,7 @@
netif_schedule_queue
netif_set_real_num_rx_queues
netif_set_real_num_tx_queues
netif_set_xps_queue
netif_tx_stop_all_queues
netif_tx_wake_queue
netlink_broadcast
@@ -1138,6 +1232,7 @@
noop_backing_dev_info
noop_llseek
nr_cpu_ids
ns_capable
ns_to_timespec64
__num_online_cpus
nvmem_cell_read
@@ -1147,6 +1242,7 @@
of_clk_get
of_clk_get_by_name
of_clk_hw_onecell_get
of_clk_set_defaults
of_clk_src_onecell_get
of_device_get_match_data
of_device_is_available
@@ -1165,6 +1261,7 @@
of_genpd_add_provider_onecell
of_get_child_by_name
of_get_compatible_child
of_get_i2c_adapter_by_node
of_get_mac_address
of_get_named_gpio_flags
of_get_next_available_child
@@ -1194,6 +1291,7 @@
of_property_read_string
of_property_read_string_helper
of_property_read_u32_index
of_property_read_u64
of_property_read_variable_u16_array
of_property_read_variable_u32_array
of_property_read_variable_u8_array
@@ -1202,6 +1300,7 @@
of_reserved_mem_device_init_by_idx
of_reserved_mem_device_release
of_reserved_mem_lookup
of_reset_control_array_get
__of_reset_control_get
of_thermal_get_ntrips
of_thermal_is_trip_valid
@@ -1214,6 +1313,8 @@
page_endio
__page_mapcount
page_mapping
__page_pinner_failure_detect
__page_pinner_put_page
page_pool_alloc_pages
page_pool_create
page_pool_destroy
@@ -1223,6 +1324,8 @@
panic_notifier_list
param_array_ops
param_get_charp
param_get_hexint
param_get_int
param_get_string
param_ops_bool
param_ops_byte
@@ -1237,14 +1340,25 @@
param_ops_ushort
param_set_charp
param_set_copystring
param_set_hexint
param_set_int
pci_disable_device
pci_enable_device
pci_find_next_bus
pci_generic_config_read
pci_generic_config_write
pci_get_device
pci_host_probe
pci_lock_rescan_remove
pci_msi_create_irq_domain
pci_msi_enabled
pci_msi_mask_irq
pci_msi_unmask_irq
pci_read_config_dword
pci_remove_root_bus
pci_rescan_bus
pci_stop_and_remove_bus_device_locked
pci_stop_root_bus
pci_unlock_rescan_remove
pci_write_config_dword
PDE_DATA
@@ -1297,8 +1411,11 @@
pinctrl_select_state
pinctrl_utils_free_map
pin_user_pages
pin_user_pages_fast
pin_user_pages_remote
platform_bus_type
platform_device_add
platform_device_add_data
platform_device_add_resources
platform_device_alloc
platform_device_del
@@ -1316,6 +1433,7 @@
platform_get_resource_byname
pm_genpd_add_subdomain
pm_genpd_init
pm_genpd_remove
pm_power_off
__pm_relax
pm_relax
@@ -1336,6 +1454,7 @@
pm_stay_awake
pm_wakeup_dev_event
pm_wakeup_ws_event
pm_wq
posix_acl_chmod
prandom_bytes
prandom_u32
@@ -1360,6 +1479,7 @@
ptp_clock_index
ptp_clock_register
ptp_clock_unregister
putback_movable_pages
put_device
__put_net
__put_page
@@ -1392,8 +1512,10 @@
_raw_spin_unlock_irq
_raw_spin_unlock_irqrestore
_raw_write_lock
_raw_write_lock_bh
_raw_write_lock_irqsave
_raw_write_unlock
_raw_write_unlock_bh
_raw_write_unlock_irqrestore
rb_erase
rb_first
@@ -1408,8 +1530,11 @@
rdev_get_drvdata
read_cache_page
read_sanitised_ftr_reg
rebuild_sched_domains
refcount_warn_saturate
__refrigerator
regcache_cache_only
regcache_mark_dirty
regcache_sync
__register_blkdev
__register_chrdev
@@ -1419,6 +1544,8 @@
register_filesystem
register_inet6addr_notifier
register_inetaddr_notifier
register_kprobe
register_kretprobe
register_module_notifier
register_netdev
register_netdevice
@@ -1440,6 +1567,8 @@
regmap_read
regmap_update_bits_base
regmap_write
regulator_bulk_disable
regulator_bulk_enable
regulator_count_voltages
regulator_disable
regulator_enable
@@ -1456,6 +1585,7 @@
remap_vmalloc_range
remove_cpu
remove_proc_entry
remove_wait_queue
request_firmware
request_firmware_nowait
__request_module
@@ -1465,6 +1595,7 @@
reset_control_deassert
reset_control_put
reset_control_reset
return_address
rfkill_alloc
rfkill_blocked
rfkill_destroy
@@ -1472,7 +1603,9 @@
rfkill_register
rfkill_resume_polling
rfkill_set_hw_state_reason
rfkill_set_sw_state
rfkill_unregister
rfs_needed
rhashtable_free_and_destroy
rhashtable_insert_slow
rhltable_init
@@ -1482,12 +1615,17 @@
round_jiffies
round_jiffies_relative
round_jiffies_up
rps_cpu_mask
rps_sock_flow_table
rtc_add_group
rtc_time64_to_tm
rtc_tm_to_time64
rtc_update_irq
rtnl_is_locked
rtnl_link_register
rtnl_link_unregister
rtnl_lock
rtnl_trylock
rtnl_unlock
runqueues
sb_set_blocksize
@@ -1502,6 +1640,7 @@
schedule
schedule_timeout
schedule_timeout_interruptible
schedule_timeout_killable
scnprintf
sdio_align_size
sdio_claim_host
@@ -1547,6 +1686,7 @@
set_capacity_and_notify
set_cpus_allowed_ptr
set_freezable
set_next_entity
set_nlink
set_normalized_timespec64
set_page_dirty
@@ -1586,6 +1726,7 @@
skb_complete_wifi_ack
skb_copy
skb_copy_bits
skb_copy_datagram_iter
skb_copy_expand
skb_dequeue
skb_ensure_writable
@@ -1593,15 +1734,18 @@
__skb_gso_segment
__skb_pad
skb_pull
skb_pull_rcsum
skb_push
skb_put
skb_queue_head
skb_queue_purge
skb_queue_tail
skb_realloc_headroom
skb_scrub_packet
skb_trim
skb_tstamp_tx
skip_spaces
smpboot_register_percpu_thread
smp_call_function
smp_call_function_single
snd_ctl_add
@@ -1625,6 +1769,7 @@
_snd_pcm_stream_lock_irqsave
snd_pcm_stream_unlock_irqrestore
snd_soc_add_card_controls
snd_soc_add_component_controls
snd_soc_add_dai_controls
snd_soc_bytes_info_ext
snd_soc_bytes_tlv_callback
@@ -1670,6 +1815,7 @@
spi_busnum_to_master
spi_controller_resume
spi_controller_suspend
spi_finalize_current_message
spi_finalize_current_transfer
spi_mem_adjust_op_size
spi_mem_default_supports_op
@@ -1697,6 +1843,8 @@
strcmp
strcpy
strcspn
stream_open
strim
strlcpy
strlen
strncasecmp
@@ -1735,11 +1883,13 @@
sync_mapping_buffers
syscon_node_to_regmap
syscon_regmap_lookup_by_phandle
sysctl_sched_latency
sysfs_create_file_ns
sysfs_create_group
sysfs_create_link
sysfs_emit
__sysfs_match_string
sysfs_remove_bin_file
sysfs_remove_file_ns
sysfs_remove_group
sysfs_remove_link
@@ -1757,6 +1907,7 @@
__tasklet_schedule
tasklet_setup
tasklet_unlock_wait
task_may_not_preempt
thermal_cooling_device_unregister
thermal_of_cooling_device_register
thermal_zone_device_unregister
@@ -1764,7 +1915,9 @@
thermal_zone_get_zone_by_name
thermal_zone_of_sensor_unregister
time64_to_tm
timespec64_to_jiffies
_totalram_pages
trace_clock_local
trace_event_buffer_commit
trace_event_buffer_reserve
trace_event_ignore_this_pid
@@ -1772,12 +1925,25 @@
trace_event_raw_init
trace_event_reg
trace_handle_return
__traceiter_android_rvh_arm64_serror_panic
__traceiter_android_rvh_check_preempt_tick
__traceiter_android_rvh_check_preempt_wakeup
__traceiter_android_rvh_do_sea
__traceiter_android_rvh_gic_v3_set_affinity
__traceiter_android_rvh_iommu_setup_dma_ops
__traceiter_android_rvh_panic_unhandled
__traceiter_android_rvh_place_entity
__traceiter_android_rvh_replace_next_task_fair
__traceiter_android_rvh_schedule
__traceiter_android_rvh_select_task_rq_rt
__traceiter_android_vh_cma_drain_all_pages_bypass
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_do_traversal_lruvec
__traceiter_android_vh_dump_throttled_rt_tasks
__traceiter_android_vh_ftrace_format_check
__traceiter_android_vh_iommu_iovad_free_iova
__traceiter_android_vh_mem_cgroup_alloc
__traceiter_android_vh_mmc_sd_update_cmdline_timing
__traceiter_android_vh_mmc_sd_update_dataline_timing
__traceiter_android_vh_rmqueue
@@ -1790,12 +1956,25 @@
__traceiter_mmap_lock_start_locking
__traceiter_xdp_exception
trace_output_call
__tracepoint_android_rvh_arm64_serror_panic
__tracepoint_android_rvh_check_preempt_tick
__tracepoint_android_rvh_check_preempt_wakeup
__tracepoint_android_rvh_do_sea
__tracepoint_android_rvh_gic_v3_set_affinity
__tracepoint_android_rvh_iommu_setup_dma_ops
__tracepoint_android_rvh_panic_unhandled
__tracepoint_android_rvh_place_entity
__tracepoint_android_rvh_replace_next_task_fair
__tracepoint_android_rvh_schedule
__tracepoint_android_rvh_select_task_rq_rt
__tracepoint_android_vh_cma_drain_all_pages_bypass
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_do_traversal_lruvec
__tracepoint_android_vh_dump_throttled_rt_tasks
__tracepoint_android_vh_ftrace_format_check
__tracepoint_android_vh_iommu_iovad_free_iova
__tracepoint_android_vh_mem_cgroup_alloc
__tracepoint_android_vh_mmc_sd_update_cmdline_timing
__tracepoint_android_vh_mmc_sd_update_dataline_timing
__tracepoint_android_vh_rmqueue
@@ -1812,6 +1991,7 @@
trace_print_array_seq
trace_print_flags_seq
trace_print_symbols_seq
__trace_puts
trace_raw_output_prep
truncate_inode_pages_final
truncate_pagecache
@@ -1844,12 +2024,15 @@
unlock_page
unmap_mapping_range
unpin_user_page
unpin_user_pages
unregister_blkdev
__unregister_chrdev
unregister_chrdev_region
unregister_filesystem
unregister_inet6addr_notifier
unregister_inetaddr_notifier
unregister_kprobe
unregister_kretprobe
unregister_module_notifier
unregister_netdev
unregister_netdevice_many
@@ -1863,25 +2046,46 @@
unregister_sysctl_table
unregister_wide_hw_breakpoint
up
update_rq_clock
up_read
up_write
usb_add_gadget_udc
usb_add_hcd
usb_add_phy_dev
usb_alloc_urb
usb_anchor_urb
usb_autopm_get_interface
usb_autopm_put_interface
usb_control_msg
usb_create_hcd
usb_del_gadget_udc
usb_deregister
usb_disable_autosuspend
usb_driver_claim_interface
usb_driver_release_interface
usb_ep_set_maxpacket_limit
usb_free_urb
usb_gadget_probe_driver
usb_gadget_unregister_driver
usb_get_from_anchor
usb_hcd_check_unlink_urb
usb_hcd_giveback_urb
usb_hcd_resume_root_hub
usb_ifnum_to_if
usb_interrupt_msg
usb_kill_anchored_urbs
usb_put_hcd
usb_register_driver
usb_remove_hcd
usb_scuttle_anchored_urbs
usb_set_interface
usb_submit_urb
usb_unanchor_urb
__usecs_to_jiffies
usleep_range_state
utf16s_to_utf8s
utf8_to_utf32
uuid_null
v4l2_ctrl_add_handler
v4l2_ctrl_handler_free
v4l2_ctrl_handler_init_class
@@ -1890,7 +2094,9 @@
v4l2_ctrl_new_std
v4l2_ctrl_subscribe_event
v4l2_device_register
v4l2_device_register_subdev
v4l2_device_unregister
v4l2_device_unregister_subdev
v4l2_event_queue_fh
v4l2_event_subscribe
v4l2_event_unsubscribe
@@ -1899,6 +2105,7 @@
v4l2_fh_exit
v4l2_fh_init
v4l2_fh_open
v4l2_i2c_subdev_init
v4l2_m2m_buf_queue
v4l2_m2m_buf_remove
v4l2_m2m_ctx_init
@@ -1945,6 +2152,7 @@
vb2_queue_release
verify_pkcs7_signature
vfree
vfs_fsync
vfs_fsync_range
video_devdata
video_device_alloc
@@ -1957,11 +2165,13 @@
vmalloc_to_pfn
vmalloc_user
vmap
vm_event_states
vmf_insert_pfn_prot
vm_insert_page
vm_unmap_aliases
vm_zone_stat
vprintk
vscnprintf
vsnprintf
vsprintf
vunmap
@@ -1972,6 +2182,7 @@
wait_for_completion_killable
wait_for_completion_timeout
__wait_on_buffer
wait_on_page_bit
__wake_up
wake_up_process
wakeup_source_register

View File

@@ -344,6 +344,21 @@
device_unregister
_dev_info
__dev_kfree_skb_any
devlink_alloc_ns
devlink_flash_update_status_notify
devlink_fmsg_binary_pair_nest_end
devlink_fmsg_binary_pair_nest_start
devlink_fmsg_binary_put
devlink_free
devlink_health_report
devlink_health_reporter_create
devlink_health_reporter_destroy
devlink_health_reporter_priv
devlink_health_reporter_state_update
devlink_region_create
devlink_region_destroy
devlink_register
devlink_unregister
devm_add_action
__devm_alloc_percpu
devm_blk_ksm_init

View File

@@ -44,9 +44,11 @@
blk_status_to_errno
call_rcu
capable
cgroup_add_dfl_cftypes
cgroup_add_legacy_cftypes
__class_register
class_unregister
clear_page
__ClearPageMovable
complete
congestion_wait
@@ -387,6 +389,15 @@
__traceiter_android_vh_check_uninterruptible_tasks_dn
__traceiter_android_vh_cleanup_old_buffers_bypass
__traceiter_android_vh_cma_drain_all_pages_bypass
__traceiter_android_vh_compact_finished
__traceiter_android_vh_alloc_pages_reclaim_bypass
__traceiter_android_vh_free_unref_page_bypass
__traceiter_android_vh_kvmalloc_node_use_vmalloc
__traceiter_android_vh_should_alloc_pages_retry
__traceiter_android_vh_unreserve_highatomic_bypass
__traceiter_android_vh_rmqueue_bulk_bypass
__traceiter_android_vh_tune_mmap_readaround
__traceiter_android_vh_ra_tuning_max_page
__traceiter_android_vh_cpufreq_acct_update_power
__traceiter_android_vh_del_page_from_lrulist
__traceiter_android_vh_do_futex
@@ -408,6 +419,7 @@
__traceiter_android_vh_irqtime_account_process_tick
__traceiter_android_vh_killed_process
__traceiter_android_vh_kmalloc_slab
__traceiter_android_vh_madvise_cold_or_pageout_abort
__traceiter_android_vh_mark_page_accessed
__traceiter_android_vh_mem_cgroup_alloc
__traceiter_android_vh_mem_cgroup_css_offline
@@ -452,6 +464,7 @@
__traceiter_block_rq_merge
__traceiter_block_rq_requeue
__traceiter_block_split
__traceiter_mm_vmscan_direct_reclaim_begin
__traceiter_net_dev_queue
__traceiter_net_dev_xmit
__traceiter_netif_receive_skb
@@ -492,6 +505,15 @@
__tracepoint_android_vh_check_uninterruptible_tasks_dn
__tracepoint_android_vh_cleanup_old_buffers_bypass
__tracepoint_android_vh_cma_drain_all_pages_bypass
__tracepoint_android_vh_compact_finished
__tracepoint_android_vh_alloc_pages_reclaim_bypass
__tracepoint_android_vh_free_unref_page_bypass
__tracepoint_android_vh_kvmalloc_node_use_vmalloc
__tracepoint_android_vh_should_alloc_pages_retry
__tracepoint_android_vh_unreserve_highatomic_bypass
__tracepoint_android_vh_rmqueue_bulk_bypass
__tracepoint_android_vh_tune_mmap_readaround
__tracepoint_android_vh_ra_tuning_max_page
__tracepoint_android_vh_cpufreq_acct_update_power
__tracepoint_android_vh_del_page_from_lrulist
__tracepoint_android_vh_dm_bufio_shrink_scan_bypass
@@ -513,6 +535,7 @@
__tracepoint_android_vh_irqtime_account_process_tick
__tracepoint_android_vh_killed_process
__tracepoint_android_vh_kmalloc_slab
__tracepoint_android_vh_madvise_cold_or_pageout_abort
__tracepoint_android_vh_mark_page_accessed
__tracepoint_android_vh_mem_cgroup_alloc
__tracepoint_android_vh_mem_cgroup_css_offline
@@ -557,6 +580,7 @@
__tracepoint_block_rq_merge
__tracepoint_block_rq_requeue
__tracepoint_block_split
__tracepoint_mm_vmscan_direct_reclaim_begin
__tracepoint_net_dev_queue
__tracepoint_net_dev_xmit
__tracepoint_netif_receive_skb

View File

@@ -914,10 +914,12 @@
int_sqrt
iomem_resource
iommu_alloc_resv_region
iommu_attach_device
iommu_attach_group
iommu_aux_attach_device
iommu_aux_detach_device
iommu_aux_get_pasid
iommu_detach_device
iommu_dev_enable_feature
iommu_dev_feature_enabled
iommu_device_register

View File

@@ -1042,6 +1042,7 @@
iommu_group_get_iommudata
iommu_group_put
iommu_group_ref_get
iommu_group_remove_device
iommu_group_set_iommudata
iommu_iova_to_phys
iommu_map

View File

@@ -1144,6 +1144,7 @@
reset_controller_register
reset_control_put
reset_control_reset
reset_control_status
rfkill_alloc
rfkill_blocked
rfkill_destroy
@@ -1441,6 +1442,7 @@
tracepoint_probe_unregister
trace_print_array_seq
trace_print_flags_seq
trace_print_hex_dump_seq
trace_print_symbols_seq
trace_raw_output_prep
trace_seq_printf

View File

@@ -3143,9 +3143,23 @@
__traceiter_android_vh_tune_swappiness
__tracepoint_android_vh_tune_swappiness
# required by unisoc_mm_cma.ko
__traceiter_android_vh_use_cma_first_check
__traceiter_android_vh_alloc_highpage_movable_gfp_adjust
__traceiter_android_vh_anon_gfp_adjust
__traceiter_android_vh_alloc_flags_cma_adjust
__traceiter_android_vh_rmqueue_cma_fallback
__tracepoint_android_vh_use_cma_first_check
__tracepoint_android_vh_alloc_highpage_movable_gfp_adjust
__tracepoint_android_vh_anon_gfp_adjust
__tracepoint_android_vh_alloc_flags_cma_adjust
__tracepoint_android_vh_rmqueue_cma_fallback
# required by unisoc_mm_emem.ko
__traceiter_android_vh_oom_check_panic
__traceiter_android_vh_show_mem
__tracepoint_android_vh_oom_check_panic
__tracepoint_android_vh_show_mem
# required by unisoc_mm_reclaim.ko
__traceiter_android_vh_do_page_trylock
@@ -3161,6 +3175,12 @@
__tracepoint_android_vh_page_trylock_set
__tracepoint_android_vh_shrink_slab_bypass
# required by unisoc_mm_slab.ko
__traceiter_android_vh_kmalloc_order_alloced
__traceiter_android_vh_slab_page_alloced
__tracepoint_android_vh_kmalloc_order_alloced
__tracepoint_android_vh_slab_page_alloced
# required by unisoc_multi_control.ko
cpufreq_table_index_unsorted

View File

@@ -545,3 +545,9 @@
#required by io module
__traceiter_android_vh_psi_group
__tracepoint_android_vh_psi_group
# required by mi_mem_center.ko
__traceiter_android_vh_rmqueue_smallest_bypass
__tracepoint_android_vh_rmqueue_smallest_bypass
__traceiter_android_vh_free_one_page_bypass
__tracepoint_android_vh_free_one_page_bypass

View File

@@ -32,6 +32,7 @@ CONFIG_CGROUP_BPF=y
CONFIG_NAMESPACES=y
# CONFIG_PID_NS is not set
CONFIG_RT_SOFTINT_OPTIMIZATION=y
CONFIG_RELAY=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
@@ -119,7 +120,7 @@ CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
CONFIG_CLEANCACHE=y
CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_AREAS=16
CONFIG_CMA_AREAS=32
CONFIG_ZSMALLOC=m
# CONFIG_ZONE_DMA is not set
CONFIG_ANON_VMA_NAME=y

View File

@@ -275,17 +275,21 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
return false;
}
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (!__populate_fault_info(vcpu))
return true;
return false;
}
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (!__populate_fault_info(vcpu))
if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
return true;
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {

View File

@@ -200,6 +200,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
};
@@ -211,6 +212,7 @@ static const exit_handler_fn pvm_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_handle_pvm_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
};

View File

@@ -32,13 +32,19 @@ static void enter_vmid_context(struct kvm_s2_mmu *mmu,
* to do.
*/
if (vcpu) {
/* We're in guest context */
if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_mmu))
return;
} else if (mmu == host_mmu) {
return;
cxt->mmu = vcpu->arch.hw_mmu;
} else {
/* We're in host context */
if (mmu == host_mmu)
return;
cxt->mmu = host_mmu;
}
cxt->mmu = mmu;
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val;

View File

@@ -109,6 +109,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
};

View File

@@ -45,6 +45,7 @@
#include <asm/virt.h>
#include <trace/hooks/fault.h>
#include <trace/hooks/mm.h>
struct fault_info {
int (*fn)(unsigned long far, unsigned int esr,
@@ -1010,6 +1011,8 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
{
gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO | __GFP_CMA;
trace_android_vh_alloc_highpage_movable_gfp_adjust(&flags);
trace_android_vh_anon_gfp_adjust(&flags);
/*
* If the page is mapped with PROT_MTE, initialise the tags at the
* point of allocation and page zeroing as this is usually faster than

View File

@@ -34,6 +34,7 @@ CONFIG_CGROUP_BPF=y
CONFIG_NAMESPACES=y
# CONFIG_TIME_NS is not set
# CONFIG_PID_NS is not set
CONFIG_RELAY=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set

View File

@@ -8,6 +8,8 @@ function update_config() {
-d INFINIBAND_QIB \
-d SAMPLES \
-d BPFILTER \
-e HID \
-e UHID \
(cd ${OUT_DIR} && \
make O=${OUT_DIR} $archsubarch CROSS_COMPILE=${CROSS_COMPILE} ${TOOL_ARGS} ${MAKE_ARGS} olddefconfig)

View File

@@ -256,6 +256,13 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_referenced_check_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_drain_all_pages_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_drain_all_pages_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_pcplist_add_cma_pages_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_unref_page_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kvmalloc_node_use_vmalloc);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_should_alloc_pages_retry);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_unreserve_highatomic_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rmqueue_bulk_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ra_tuning_max_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_mmap_readaround);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_slab_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_insert);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_node_delete);
@@ -415,8 +422,13 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_inactive_ratio);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_hibernation_swap);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_save_cpu_resume);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_save_hib_resume_bdev);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_highpage_movable_gfp_adjust);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_anon_gfp_adjust);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_flags_cma_adjust);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rmqueue_cma_fallback);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dma_buf_stats_teardown);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_cold_or_pageout);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_cold_or_pageout_abort);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_alloc_retry);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_encrypt_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_init_aes_encrypt);
@@ -430,6 +442,10 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_event);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_group);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rmqueue_smallest_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_one_page_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_use_cma_first_check);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_slab_page_alloced);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kmalloc_order_alloced);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_compact_finished);
/*
* For type visibility
*/

View File

@@ -32,6 +32,7 @@
#include <linux/hiddev.h>
#include <linux/hid-debug.h>
#include <linux/hidraw.h>
#include <linux/uhid.h>
#include "hid-ids.h"
@@ -258,6 +259,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
{
struct hid_report *report;
struct hid_field *field;
unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
unsigned int usages;
unsigned int offset;
unsigned int i;
@@ -288,8 +290,11 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
offset = report->size;
report->size += parser->global.report_size * parser->global.report_count;
if (IS_ENABLED(CONFIG_UHID) && parser->device->ll_driver == &uhid_hid_driver)
max_buffer_size = UHID_DATA_MAX;
/* Total size check: Allow for possible report index byte */
if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
if (report->size > (max_buffer_size - 1) << 3) {
hid_err(parser->device, "report is too long\n");
return -1;
}
@@ -1752,6 +1757,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report;
struct hid_driver *hdrv;
int max_buffer_size = HID_MAX_BUFFER_SIZE;
unsigned int a;
u32 rsize, csize = size;
u8 *cdata = data;
@@ -1768,10 +1774,13 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
rsize = hid_compute_report_size(report);
if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
rsize = HID_MAX_BUFFER_SIZE - 1;
else if (rsize > HID_MAX_BUFFER_SIZE)
rsize = HID_MAX_BUFFER_SIZE;
if (IS_ENABLED(CONFIG_UHID) && hid->ll_driver == &uhid_hid_driver)
max_buffer_size = UHID_DATA_MAX;
if (report_enum->numbered && rsize >= max_buffer_size)
rsize = max_buffer_size - 1;
else if (rsize > max_buffer_size)
rsize = max_buffer_size;
if (csize < rsize) {
dbg_hid("report %d is too short, (%d < %d)\n", report->id,

View File

@@ -879,6 +879,14 @@ static int mtk_iommu_probe(struct platform_device *pdev)
return PTR_ERR(data->bclk);
}
if (MTK_IOMMU_HAS_FLAG(data->plat_data, PGTABLE_PA_35_EN)) {
ret = dma_set_mask(dev, DMA_BIT_MASK(35));
if (ret) {
dev_err(dev, "Failed to set dma_mask 35.\n");
return ret;
}
}
larb_nr = of_count_phandle_with_args(dev->of_node,
"mediatek,larbs", NULL);
if (larb_nr < 0)

View File

@@ -37,6 +37,7 @@ config VIDEO_PVRUSB2_DVB
bool "pvrusb2 ATSC/DVB support"
default y
depends on VIDEO_PVRUSB2 && DVB_CORE
depends on VIDEO_PVRUSB2=m || DVB_CORE=y
select DVB_LGDT330X if MEDIA_SUBDRV_AUTOSELECT
select DVB_S5H1409 if MEDIA_SUBDRV_AUTOSELECT
select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT

View File

@@ -102,6 +102,15 @@
16, 4, buf, __len, false); \
} while (0)
/*
* ANDROID: this mutex is used to serialize devfreq and sysfs write booster
* toggling, it was taken out of struct ufs_hba from commit b03f7ed9af6e ("scsi:
* ufs: core: Fix devfreq deadlocks") and made static here in order to preserve
* the ABI.
* Bug: 286803489
*/
static DEFINE_MUTEX(wb_mutex);
int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
const char *prefix)
{
@@ -1207,12 +1216,14 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
* clock scaling is in progress
*/
ufshcd_scsi_block_requests(hba);
mutex_lock(&wb_mutex);
down_write(&hba->clk_scaling_lock);
if (!hba->clk_scaling.is_allowed ||
ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&wb_mutex);
ufshcd_scsi_unblock_requests(hba);
goto out;
}
@@ -1224,17 +1235,15 @@ out:
return ret;
}
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
{
/* Enable Write Booster if we have scaled up else disable it */
if (ufshcd_enable_wb_if_scaling_up(hba)) {
if (writelock)
up_write(&hba->clk_scaling_lock);
else
up_read(&hba->clk_scaling_lock);
up_write(&hba->clk_scaling_lock);
ufshcd_wb_toggle(hba, writelock);
}
/* Enable Write Booster if we have scaled up else disable it */
if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
ufshcd_wb_toggle(hba, scale_up);
mutex_unlock(&wb_mutex);
ufshcd_scsi_unblock_requests(hba);
ufshcd_release(hba);
@@ -1252,7 +1261,6 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
bool is_writelock = true;
ret = ufshcd_clock_scaling_prepare(hba);
if (ret)
@@ -1281,13 +1289,8 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
}
}
/* Enable Write Booster if we have scaled up else disable it */
downgrade_write(&hba->clk_scaling_lock);
is_writelock = false;
ufshcd_wb_toggle(hba, scale_up);
out_unprepare:
ufshcd_clock_scaling_unprepare(hba, is_writelock);
ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
return ret;
}
@@ -6030,9 +6033,11 @@ static void ufshcd_force_error_recovery(struct ufs_hba *hba)
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
mutex_lock(&wb_mutex);
down_write(&hba->clk_scaling_lock);
hba->clk_scaling.is_allowed = allow;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&wb_mutex);
}
static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)

View File

@@ -1043,7 +1043,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
reg &= ~DWC3_DALEPENA_EP(dep->number);
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
dwc3_remove_requests(dwc, dep, -ECONNRESET);
dwc3_remove_requests(dwc, dep, -ESHUTDOWN);
dep->stream_capable = false;
dep->type = 0;

View File

@@ -2109,7 +2109,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
{
unsigned int s_flags = sbi->sb->s_flags;
struct cp_control cpc;
unsigned int gc_mode;
unsigned int gc_mode = sbi->gc_mode;
int err = 0;
int ret;
block_t unusable;
@@ -2120,9 +2120,13 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
}
sbi->sb->s_flags |= SB_ACTIVE;
/* check if we need more GC first */
unusable = f2fs_get_unusable_blocks(sbi);
if (!f2fs_disable_cp_again(sbi, unusable))
goto skip_gc;
f2fs_update_time(sbi, DISABLE_TIME);
gc_mode = sbi->gc_mode;
sbi->gc_mode = GC_URGENT_HIGH;
while (!f2fs_time_over(sbi, DISABLE_TIME)) {
@@ -2148,6 +2152,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
goto restore_flag;
}
skip_gc:
f2fs_down_write(&sbi->gc_lock);
cpc.reason = CP_PAUSE;
set_sbi_flag(sbi, SBI_CP_DISABLED);

View File

@@ -295,44 +295,19 @@ void *fuse_create_open_finalize(
}
int fuse_release_initialize(struct fuse_bpf_args *fa, struct fuse_release_in *fri,
struct inode *inode, struct file *file)
struct inode *inode, struct fuse_file *ff)
{
struct fuse_file *fuse_file = file->private_data;
/* Always put backing file whatever bpf/userspace says */
fput(fuse_file->backing_file);
fput(ff->backing_file);
*fri = (struct fuse_release_in) {
.fh = ((struct fuse_file *)(file->private_data))->fh,
.fh = ff->fh,
};
*fa = (struct fuse_bpf_args) {
.nodeid = get_fuse_inode(inode)->nodeid,
.opcode = FUSE_RELEASE,
.in_numargs = 1,
.in_args[0].size = sizeof(*fri),
.in_args[0].value = fri,
};
return 0;
}
int fuse_releasedir_initialize(struct fuse_bpf_args *fa,
struct fuse_release_in *fri,
struct inode *inode, struct file *file)
{
struct fuse_file *fuse_file = file->private_data;
/* Always put backing file whatever bpf/userspace says */
fput(fuse_file->backing_file);
*fri = (struct fuse_release_in) {
.fh = ((struct fuse_file *)(file->private_data))->fh,
};
*fa = (struct fuse_bpf_args) {
.nodeid = get_fuse_inode(inode)->nodeid,
.opcode = FUSE_RELEASEDIR,
.opcode = S_ISDIR(inode->i_mode) ? FUSE_RELEASEDIR
: FUSE_RELEASE,
.in_numargs = 1,
.in_args[0].size = sizeof(*fri),
.in_args[0].value = fri,
@@ -342,15 +317,14 @@ int fuse_releasedir_initialize(struct fuse_bpf_args *fa,
}
int fuse_release_backing(struct fuse_bpf_args *fa,
struct inode *inode, struct file *file)
struct inode *inode, struct fuse_file *ff)
{
return 0;
}
void *fuse_release_finalize(struct fuse_bpf_args *fa,
struct inode *inode, struct file *file)
struct inode *inode, struct fuse_file *ff)
{
fuse_file_free(file->private_data);
return NULL;
}
@@ -1223,14 +1197,12 @@ int fuse_handle_bpf_prog(struct fuse_entry_bpf *feb, struct inode *parent,
}
/* Cannot change existing program */
if (*bpf && new_bpf) {
bpf_prog_put(new_bpf);
if (*bpf) {
if (new_bpf)
bpf_prog_put(new_bpf);
return new_bpf == *bpf ? 0 : -EINVAL;
}
if (*bpf)
bpf_prog_put(*bpf);
*bpf = new_bpf;
return 0;
}

View File

@@ -315,7 +315,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
spin_unlock(&fi->lock);
}
kfree(forget);
if (ret == -ENOMEM)
if (ret == -ENOMEM || ret == -EINTR)
goto out;
if (ret || fuse_invalid_attr(&outarg.attr) ||
fuse_stale_inode(inode, outarg.generation, &outarg.attr))
@@ -1786,17 +1786,6 @@ static int fuse_dir_open(struct inode *inode, struct file *file)
static int fuse_dir_release(struct inode *inode, struct file *file)
{
#ifdef CONFIG_FUSE_BPF
struct fuse_err_ret fer;
fer = fuse_bpf_backing(inode, struct fuse_release_in,
fuse_releasedir_initialize, fuse_release_backing,
fuse_release_finalize,
inode, file);
if (fer.ret)
return PTR_ERR(fer.result);
#endif
fuse_release_common(file, true);
return 0;
}

View File

@@ -104,25 +104,39 @@ static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args,
kfree(ra);
}
static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
static void fuse_file_put(struct inode *inode, struct fuse_file *ff,
bool sync, bool isdir)
{
if (refcount_dec_and_test(&ff->count)) {
struct fuse_args *args = &ff->release_args->args;
struct fuse_args *args = &ff->release_args->args;
#ifdef CONFIG_FUSE_BPF
struct fuse_err_ret fer;
#endif
if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) {
/* Do nothing when client does not implement 'open' */
fuse_release_end(ff->fm, args, 0);
} else if (sync) {
fuse_simple_request(ff->fm, args);
fuse_release_end(ff->fm, args, 0);
} else {
args->end = fuse_release_end;
if (fuse_simple_background(ff->fm, args,
GFP_KERNEL | __GFP_NOFAIL))
fuse_release_end(ff->fm, args, -ENOTCONN);
}
kfree(ff);
if (!refcount_dec_and_test(&ff->count))
return;
#ifdef CONFIG_FUSE_BPF
fer = fuse_bpf_backing(inode, struct fuse_release_in,
fuse_release_initialize, fuse_release_backing,
fuse_release_finalize,
inode, ff);
if (fer.ret) {
fuse_release_end(ff->fm, args, 0);
} else
#endif
if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) {
/* Do nothing when client does not implement 'open' */
fuse_release_end(ff->fm, args, 0);
} else if (sync) {
fuse_simple_request(ff->fm, args);
fuse_release_end(ff->fm, args, 0);
} else {
args->end = fuse_release_end;
if (fuse_simple_background(ff->fm, args,
GFP_KERNEL | __GFP_NOFAIL))
fuse_release_end(ff->fm, args, -ENOTCONN);
}
kfree(ff);
}
struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
@@ -345,7 +359,7 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff,
* synchronous RELEASE is allowed (and desirable) in this case
* because the server can be trusted not to screw up.
*/
fuse_file_put(ff, ff->fm->fc->destroy, isdir);
fuse_file_put(ra->inode, ff, ff->fm->fc->destroy, isdir);
}
void fuse_release_common(struct file *file, bool isdir)
@@ -363,17 +377,6 @@ static int fuse_release(struct inode *inode, struct file *file)
{
struct fuse_conn *fc = get_fuse_conn(inode);
#ifdef CONFIG_FUSE_BPF
struct fuse_err_ret fer;
fer = fuse_bpf_backing(inode, struct fuse_release_in,
fuse_release_initialize, fuse_release_backing,
fuse_release_finalize,
inode, file);
if (fer.ret)
return PTR_ERR(fer.result);
#endif
/* see fuse_vma_close() for !writeback_cache case */
if (fc->writeback_cache)
write_inode_now(inode, 1);
@@ -393,7 +396,7 @@ void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff,
* iput(NULL) is a no-op and since the refcount is 1 and everything's
* synchronous, we are fine with not doing igrab() here"
*/
fuse_file_put(ff, true, false);
fuse_file_put(&fi->inode, ff, true, false);
}
EXPORT_SYMBOL_GPL(fuse_sync_release);
@@ -967,8 +970,11 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
unlock_page(page);
put_page(page);
}
if (ia->ff)
fuse_file_put(ia->ff, false, false);
if (ia->ff) {
WARN_ON(!mapping);
fuse_file_put(mapping ? mapping->host : NULL, ia->ff,
false, false);
}
fuse_io_free(ia);
}
@@ -1707,7 +1713,7 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa)
__free_page(ap->pages[i]);
if (wpa->ia.ff)
fuse_file_put(wpa->ia.ff, false, false);
fuse_file_put(wpa->inode, wpa->ia.ff, false, false);
kfree(ap->pages);
kfree(wpa);
@@ -1963,7 +1969,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
ff = __fuse_write_file_get(fi);
err = fuse_flush_times(inode, ff);
if (ff)
fuse_file_put(ff, false, false);
fuse_file_put(inode, ff, false, false);
return err;
}
@@ -2352,7 +2358,7 @@ static int fuse_writepages(struct address_space *mapping,
fuse_writepages_send(&data);
}
if (data.ff)
fuse_file_put(data.ff, false, false);
fuse_file_put(inode, data.ff, false, false);
kfree(data.orig_pages);
out:

View File

@@ -1503,14 +1503,11 @@ void *fuse_link_finalize(struct fuse_bpf_args *fa, struct dentry *entry,
struct inode *dir, struct dentry *newent);
int fuse_release_initialize(struct fuse_bpf_args *fa, struct fuse_release_in *fri,
struct inode *inode, struct file *file);
int fuse_releasedir_initialize(struct fuse_bpf_args *fa,
struct fuse_release_in *fri,
struct inode *inode, struct file *file);
struct inode *inode, struct fuse_file *ff);
int fuse_release_backing(struct fuse_bpf_args *fa,
struct inode *inode, struct file *file);
struct inode *inode, struct fuse_file *ff);
void *fuse_release_finalize(struct fuse_bpf_args *fa,
struct inode *inode, struct file *file);
struct inode *inode, struct fuse_file *ff);
int fuse_flush_initialize(struct fuse_bpf_args *fa, struct fuse_flush_in *ffi,
struct file *file, fl_owner_t id);

View File

@@ -113,6 +113,10 @@ static void fuse_free_inode(struct inode *inode)
kfree(fi->forget);
#ifdef CONFIG_FUSE_DAX
kfree(fi->dax);
#endif
#ifdef CONFIG_FUSE_BPF
if (fi->bpf)
bpf_prog_put(fi->bpf);
#endif
kmem_cache_free(fuse_inode_cachep, fi);
}
@@ -123,13 +127,6 @@ static void fuse_evict_inode(struct inode *inode)
/* Will write inode on close/munmap and in all other dirtiers */
WARN_ON(inode->i_state & I_DIRTY_INODE);
#ifdef CONFIG_FUSE_BPF
iput(fi->backing_inode);
if (fi->bpf)
bpf_prog_put(fi->bpf);
fi->bpf = NULL;
#endif
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
if (inode->i_sb->s_flags & SB_ACTIVE) {
@@ -149,6 +146,15 @@ static void fuse_evict_inode(struct inode *inode)
}
}
#ifdef CONFIG_FUSE_BPF
static void fuse_destroy_inode(struct inode *inode)
{
struct fuse_inode *fi = get_fuse_inode(inode);
iput(fi->backing_inode);
}
#endif
static int fuse_reconfigure(struct fs_context *fsc)
{
struct super_block *sb = fsc->root->d_sb;
@@ -1166,6 +1172,9 @@ static const struct export_operations fuse_export_operations = {
static const struct super_operations fuse_super_operations = {
.alloc_inode = fuse_alloc_inode,
#ifdef CONFIG_FUSE_BPF
.destroy_inode = fuse_destroy_inode,
#endif
.free_inode = fuse_free_inode,
.evict_inode = fuse_evict_inode,
.write_inode = fuse_write_inode,

View File

@@ -918,10 +918,10 @@ static long ioctl_get_read_timeouts(struct mount_info *mi, void __user *arg)
if (copy_from_user(&args, args_usr_ptr, sizeof(args)))
return -EINVAL;
if (args.timeouts_array_size_out > INCFS_DATA_FILE_BLOCK_SIZE)
if (args.timeouts_array_size > INCFS_DATA_FILE_BLOCK_SIZE)
return -EINVAL;
buffer = kzalloc(args.timeouts_array_size_out, GFP_NOFS);
buffer = kzalloc(args.timeouts_array_size, GFP_NOFS);
if (!buffer)
return -ENOMEM;

View File

@@ -5,9 +5,9 @@
obj-$(CONFIG_SQUASHFS) += squashfs.o
squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
squashfs-y += namei.o super.o symlink.o decompressor.o
squashfs-y += namei.o super.o symlink.o decompressor.o page_actor.o
squashfs-$(CONFIG_SQUASHFS_FILE_CACHE) += file_cache.o
squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o page_actor.o
squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_SINGLE) += decompressor_single.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI) += decompressor_multi.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) += decompressor_multi_percpu.o

View File

@@ -17,6 +17,7 @@
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
@@ -34,12 +35,15 @@ static int copy_bio_to_actor(struct bio *bio,
struct squashfs_page_actor *actor,
int offset, int req_length)
{
void *actor_addr = squashfs_first_page(actor);
void *actor_addr;
struct bvec_iter_all iter_all = {};
struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
int copied_bytes = 0;
int actor_offset = 0;
squashfs_actor_nobuff(actor);
actor_addr = squashfs_first_page(actor);
if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all)))
return 0;
@@ -49,8 +53,9 @@ static int copy_bio_to_actor(struct bio *bio,
bytes_to_copy = min_t(int, bytes_to_copy,
req_length - copied_bytes);
memcpy(actor_addr + actor_offset, bvec_virt(bvec) + offset,
bytes_to_copy);
if (!IS_ERR(actor_addr))
memcpy(actor_addr + actor_offset, bvec_virt(bvec) +
offset, bytes_to_copy);
actor_offset += bytes_to_copy;
copied_bytes += bytes_to_copy;
@@ -72,10 +77,120 @@ static int copy_bio_to_actor(struct bio *bio,
return copied_bytes;
}
static int squashfs_bio_read_cached(struct bio *fullbio,
struct address_space *cache_mapping, u64 index, int length,
u64 read_start, u64 read_end, int page_count)
{
struct page *head_to_cache = NULL, *tail_to_cache = NULL;
int start_idx = 0, end_idx = 0;
struct bvec_iter_all iter_all;
struct bio *bio = NULL;
struct bio_vec *bv;
int idx = 0;
int err = 0;
bio_for_each_segment_all(bv, fullbio, iter_all) {
struct page *page = bv->bv_page;
if (page->mapping == cache_mapping) {
idx++;
continue;
}
/*
* We only use this when the device block size is the same as
* the page size, so read_start and read_end cover full pages.
*
* Compare these to the original required index and length to
* only cache pages which were requested partially, since these
* are the ones which are likely to be needed when reading
* adjacent blocks.
*/
if (idx == 0 && index != read_start)
head_to_cache = page;
else if (idx == page_count - 1 && index + length != read_end)
tail_to_cache = page;
if (!bio || idx != end_idx) {
struct bio *new = bio_clone_fast(fullbio,
GFP_NOIO, &fs_bio_set);
if (bio) {
bio_trim(bio, start_idx * PAGE_SECTORS,
(end_idx - start_idx) * PAGE_SECTORS);
bio_chain(bio, new);
submit_bio(bio);
}
bio = new;
start_idx = idx;
}
idx++;
end_idx = idx;
}
if (bio) {
bio_trim(bio, start_idx * PAGE_SECTORS,
(end_idx - start_idx) * PAGE_SECTORS);
err = submit_bio_wait(bio);
bio_put(bio);
}
if (err)
return err;
if (head_to_cache) {
int ret = add_to_page_cache_lru(head_to_cache, cache_mapping,
read_start >> PAGE_SHIFT,
GFP_NOIO);
if (!ret) {
SetPageUptodate(head_to_cache);
unlock_page(head_to_cache);
}
}
if (tail_to_cache) {
int ret = add_to_page_cache_lru(tail_to_cache, cache_mapping,
(read_end >> PAGE_SHIFT) - 1,
GFP_NOIO);
if (!ret) {
SetPageUptodate(tail_to_cache);
unlock_page(tail_to_cache);
}
}
return 0;
}
static struct page *squashfs_get_cache_page(struct address_space *mapping,
pgoff_t index)
{
struct page *page;
if (!mapping)
return NULL;
page = find_get_page(mapping, index);
if (!page)
return NULL;
if (!PageUptodate(page)) {
put_page(page);
return NULL;
}
return page;
}
static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
struct bio **biop, int *block_offset)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
struct address_space *cache_mapping = msblk->cache_mapping;
const u64 read_start = round_down(index, msblk->devblksize);
const sector_t block = read_start >> msblk->devblksize_log2;
const u64 read_end = round_up(index + length, msblk->devblksize);
@@ -101,21 +216,33 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
for (i = 0; i < page_count; ++i) {
unsigned int len =
min_t(unsigned int, PAGE_SIZE - offset, total_len);
struct page *page = alloc_page(GFP_NOIO);
pgoff_t index = (read_start >> PAGE_SHIFT) + i;
struct page *page;
page = squashfs_get_cache_page(cache_mapping, index);
if (!page)
page = alloc_page(GFP_NOIO);
if (!page) {
error = -ENOMEM;
goto out_free_bio;
}
if (!bio_add_page(bio, page, len, offset)) {
error = -EIO;
goto out_free_bio;
}
/*
* Use the __ version to avoid merging since we need each page
* to be separate when we check for and avoid cached pages.
*/
__bio_add_page(bio, page, len, offset);
offset = 0;
total_len -= len;
}
error = submit_bio_wait(bio);
if (cache_mapping)
error = squashfs_bio_read_cached(bio, cache_mapping, index,
length, read_start, read_end,
page_count);
else
error = submit_bio_wait(bio);
if (error)
goto out_free_bio;

View File

@@ -20,6 +20,7 @@ struct squashfs_decompressor {
struct bio *, int, int, struct squashfs_page_actor *);
int id;
char *name;
int alloc_buffer;
int supported;
};

View File

@@ -39,6 +39,7 @@
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
#include "page_actor.h"
/*
* Locate cache slot in range [offset, index] for specified inode. If
@@ -494,7 +495,142 @@ out:
return 0;
}
static int squashfs_readahead_fragment(struct page **page,
unsigned int pages, unsigned int expected)
{
struct inode *inode = page[0]->mapping->host;
struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
squashfs_i(inode)->fragment_block,
squashfs_i(inode)->fragment_size);
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
unsigned int n, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
int error = buffer->error;
if (error)
goto out;
expected += squashfs_i(inode)->fragment_offset;
for (n = 0; n < pages; n++) {
unsigned int base = (page[n]->index & mask) << PAGE_SHIFT;
unsigned int offset = base + squashfs_i(inode)->fragment_offset;
if (expected > offset) {
unsigned int avail = min_t(unsigned int, expected -
offset, PAGE_SIZE);
squashfs_fill_page(page[n], buffer, offset, avail);
}
unlock_page(page[n]);
put_page(page[n]);
}
out:
squashfs_cache_put(buffer);
return error;
}
static void squashfs_readahead(struct readahead_control *ractl)
{
struct inode *inode = ractl->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
size_t mask = (1UL << msblk->block_log) - 1;
unsigned short shift = msblk->block_log - PAGE_SHIFT;
loff_t start = readahead_pos(ractl) & ~mask;
size_t len = readahead_length(ractl) + readahead_pos(ractl) - start;
struct squashfs_page_actor *actor;
unsigned int nr_pages = 0;
struct page **pages;
int i, file_end = i_size_read(inode) >> msblk->block_log;
unsigned int max_pages = 1UL << shift;
readahead_expand(ractl, start, (len | mask) + 1);
pages = kmalloc_array(max_pages, sizeof(void *), GFP_KERNEL);
if (!pages)
return;
for (;;) {
pgoff_t index;
int res, bsize;
u64 block = 0;
unsigned int expected;
struct page *last_page;
expected = start >> msblk->block_log == file_end ?
(i_size_read(inode) & (msblk->block_size - 1)) :
msblk->block_size;
max_pages = (expected + PAGE_SIZE - 1) >> PAGE_SHIFT;
nr_pages = __readahead_batch(ractl, pages, max_pages);
if (!nr_pages)
break;
if (readahead_pos(ractl) >= i_size_read(inode))
goto skip_pages;
index = pages[0]->index >> shift;
if ((pages[nr_pages - 1]->index >> shift) != index)
goto skip_pages;
if (index == file_end && squashfs_i(inode)->fragment_block !=
SQUASHFS_INVALID_BLK) {
res = squashfs_readahead_fragment(pages, nr_pages,
expected);
if (res)
goto skip_pages;
continue;
}
bsize = read_blocklist(inode, index, &block);
if (bsize == 0)
goto skip_pages;
actor = squashfs_page_actor_init_special(msblk, pages, nr_pages,
expected);
if (!actor)
goto skip_pages;
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
last_page = squashfs_page_actor_free(actor);
if (res == expected) {
int bytes;
/* Last page (if present) may have trailing bytes not filled */
bytes = res % PAGE_SIZE;
if (index == file_end && bytes && last_page)
memzero_page(last_page, bytes,
PAGE_SIZE - bytes);
for (i = 0; i < nr_pages; i++) {
flush_dcache_page(pages[i]);
SetPageUptodate(pages[i]);
}
}
for (i = 0; i < nr_pages; i++) {
unlock_page(pages[i]);
put_page(pages[i]);
}
}
kfree(pages);
return;
skip_pages:
for (i = 0; i < nr_pages; i++) {
unlock_page(pages[i]);
put_page(pages[i]);
}
kfree(pages);
}
const struct address_space_operations squashfs_aops = {
.readpage = squashfs_readpage
.readpage = squashfs_readpage,
.readahead = squashfs_readahead
};

View File

@@ -18,9 +18,6 @@
#include "squashfs.h"
#include "page_actor.h"
static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
int pages, struct page **page, int bytes);
/* Read separately compressed datablock directly into page cache */
int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
int expected)
@@ -33,7 +30,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
int start_index = target_page->index & ~mask;
int end_index = start_index | mask;
int i, n, pages, missing_pages, bytes, res = -ENOMEM;
int i, n, pages, bytes, res = -ENOMEM;
struct page **page;
struct squashfs_page_actor *actor;
void *pageaddr;
@@ -47,50 +44,38 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
if (page == NULL)
return res;
/*
* Create a "page actor" which will kmap and kunmap the
* page cache pages appropriately within the decompressor
*/
actor = squashfs_page_actor_init_special(page, pages, 0);
if (actor == NULL)
goto out;
/* Try to grab all the pages covered by the Squashfs block */
for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) {
for (i = 0, n = start_index; n <= end_index; n++) {
page[i] = (n == target_page->index) ? target_page :
grab_cache_page_nowait(target_page->mapping, n);
if (page[i] == NULL) {
missing_pages++;
if (page[i] == NULL)
continue;
}
if (PageUptodate(page[i])) {
unlock_page(page[i]);
put_page(page[i]);
page[i] = NULL;
missing_pages++;
continue;
}
i++;
}
if (missing_pages) {
/*
* Couldn't get one or more pages, this page has either
* been VM reclaimed, but others are still in the page cache
* and uptodate, or we're racing with another thread in
* squashfs_readpage also trying to grab them. Fall back to
* using an intermediate buffer.
*/
res = squashfs_read_cache(target_page, block, bsize, pages,
page, expected);
if (res < 0)
goto mark_errored;
pages = i;
/*
* Create a "page actor" which will kmap and kunmap the
* page cache pages appropriately within the decompressor
*/
actor = squashfs_page_actor_init_special(msblk, page, pages, expected);
if (actor == NULL)
goto out;
}
/* Decompress directly into the page cache buffers */
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
squashfs_page_actor_free(actor);
if (res < 0)
goto mark_errored;
@@ -99,12 +84,12 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
goto mark_errored;
}
/* Last page may have trailing bytes not filled */
/* Last page (if present) may have trailing bytes not filled */
bytes = res % PAGE_SIZE;
if (bytes) {
pageaddr = kmap_atomic(page[pages - 1]);
if (page[pages - 1]->index == end_index && bytes) {
pageaddr = kmap_local_page(page[pages - 1]);
memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
kunmap_atomic(pageaddr);
kunmap_local(pageaddr);
}
/* Mark pages as uptodate, unlock and release */
@@ -116,7 +101,6 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
put_page(page[i]);
}
kfree(actor);
kfree(page);
return 0;
@@ -135,40 +119,6 @@ mark_errored:
}
out:
kfree(actor);
kfree(page);
return res;
}
static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
int pages, struct page **page, int bytes)
{
struct inode *i = target_page->mapping->host;
struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
block, bsize);
int res = buffer->error, n, offset = 0;
if (res) {
ERROR("Unable to read page, block %llx, size %x\n", block,
bsize);
goto out;
}
for (n = 0; n < pages && bytes > 0; n++,
bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
int avail = min_t(int, bytes, PAGE_SIZE);
if (page[n] == NULL)
continue;
squashfs_fill_page(page[n], buffer, offset, avail);
unlock_page(page[n]);
if (page[n] != target_page)
put_page(page[n]);
}
out:
squashfs_cache_put(buffer);
return res;
}

View File

@@ -119,10 +119,12 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
buff = stream->output;
while (data) {
if (bytes <= PAGE_SIZE) {
memcpy(data, buff, bytes);
if (!IS_ERR(data))
memcpy(data, buff, bytes);
break;
}
memcpy(data, buff, PAGE_SIZE);
if (!IS_ERR(data))
memcpy(data, buff, PAGE_SIZE);
buff += PAGE_SIZE;
bytes -= PAGE_SIZE;
data = squashfs_next_page(output);
@@ -139,5 +141,6 @@ const struct squashfs_decompressor squashfs_lz4_comp_ops = {
.decompress = lz4_uncompress,
.id = LZ4_COMPRESSION,
.name = "lz4",
.alloc_buffer = 0,
.supported = 1
};

View File

@@ -93,10 +93,12 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
buff = stream->output;
while (data) {
if (bytes <= PAGE_SIZE) {
memcpy(data, buff, bytes);
if (!IS_ERR(data))
memcpy(data, buff, bytes);
break;
} else {
memcpy(data, buff, PAGE_SIZE);
if (!IS_ERR(data))
memcpy(data, buff, PAGE_SIZE);
buff += PAGE_SIZE;
bytes -= PAGE_SIZE;
data = squashfs_next_page(output);
@@ -116,5 +118,6 @@ const struct squashfs_decompressor squashfs_lzo_comp_ops = {
.decompress = lzo_uncompress,
.id = LZO_COMPRESSION,
.name = "lzo",
.alloc_buffer = 0,
.supported = 1
};

View File

@@ -7,6 +7,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include "squashfs_fs_sb.h"
#include "decompressor.h"
#include "page_actor.h"
/*
@@ -50,6 +52,7 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
actor->buffer = buffer;
actor->pages = pages;
actor->next_page = 0;
actor->tmp_buffer = NULL;
actor->squashfs_first_page = cache_first_page;
actor->squashfs_next_page = cache_next_page;
actor->squashfs_finish_page = cache_finish_page;
@@ -57,40 +60,75 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
}
/* Implementation of page_actor for decompressing directly into page cache. */
static void *handle_next_page(struct squashfs_page_actor *actor)
{
int max_pages = (actor->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (actor->returned_pages == max_pages)
return NULL;
if ((actor->next_page == actor->pages) ||
(actor->next_index != actor->page[actor->next_page]->index)) {
actor->next_index++;
actor->returned_pages++;
actor->last_page = NULL;
return actor->alloc_buffer ? actor->tmp_buffer : ERR_PTR(-ENOMEM);
}
actor->next_index++;
actor->returned_pages++;
actor->last_page = actor->page[actor->next_page];
return actor->pageaddr = kmap_local_page(actor->page[actor->next_page++]);
}
static void *direct_first_page(struct squashfs_page_actor *actor)
{
actor->next_page = 1;
return actor->pageaddr = kmap_atomic(actor->page[0]);
return handle_next_page(actor);
}
static void *direct_next_page(struct squashfs_page_actor *actor)
{
if (actor->pageaddr)
kunmap_atomic(actor->pageaddr);
if (actor->pageaddr) {
kunmap_local(actor->pageaddr);
actor->pageaddr = NULL;
}
return actor->pageaddr = actor->next_page == actor->pages ? NULL :
kmap_atomic(actor->page[actor->next_page++]);
return handle_next_page(actor);
}
static void direct_finish_page(struct squashfs_page_actor *actor)
{
if (actor->pageaddr)
kunmap_atomic(actor->pageaddr);
kunmap_local(actor->pageaddr);
}
struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
int pages, int length)
struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk,
struct page **page, int pages, int length)
{
struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
if (actor == NULL)
return NULL;
if (msblk->decompressor->alloc_buffer) {
actor->tmp_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (actor->tmp_buffer == NULL) {
kfree(actor);
return NULL;
}
} else
actor->tmp_buffer = NULL;
actor->length = length ? : pages * PAGE_SIZE;
actor->page = page;
actor->pages = pages;
actor->next_page = 0;
actor->returned_pages = 0;
actor->next_index = page[0]->index & ~((1 << (msblk->block_log - PAGE_SHIFT)) - 1);
actor->pageaddr = NULL;
actor->last_page = NULL;
actor->alloc_buffer = msblk->decompressor->alloc_buffer;
actor->squashfs_first_page = direct_first_page;
actor->squashfs_next_page = direct_next_page;
actor->squashfs_finish_page = direct_finish_page;

View File

@@ -6,63 +6,38 @@
* Phillip Lougher <phillip@squashfs.org.uk>
*/
#ifndef CONFIG_SQUASHFS_FILE_DIRECT
struct squashfs_page_actor {
void **page;
int pages;
int length;
int next_page;
};
static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
int pages, int length)
{
struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
if (actor == NULL)
return NULL;
actor->length = length ? : pages * PAGE_SIZE;
actor->page = page;
actor->pages = pages;
actor->next_page = 0;
return actor;
}
static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
{
actor->next_page = 1;
return actor->page[0];
}
static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
{
return actor->next_page == actor->pages ? NULL :
actor->page[actor->next_page++];
}
static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
{
/* empty */
}
#else
struct squashfs_page_actor {
union {
void **buffer;
struct page **page;
};
void *pageaddr;
void *tmp_buffer;
void *(*squashfs_first_page)(struct squashfs_page_actor *);
void *(*squashfs_next_page)(struct squashfs_page_actor *);
void (*squashfs_finish_page)(struct squashfs_page_actor *);
struct page *last_page;
int pages;
int length;
int next_page;
int alloc_buffer;
int returned_pages;
pgoff_t next_index;
};
extern struct squashfs_page_actor *squashfs_page_actor_init(void **, int, int);
extern struct squashfs_page_actor *squashfs_page_actor_init_special(struct page
**, int, int);
extern struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
int pages, int length);
extern struct squashfs_page_actor *squashfs_page_actor_init_special(
struct squashfs_sb_info *msblk,
struct page **page, int pages, int length);
static inline struct page *squashfs_page_actor_free(struct squashfs_page_actor *actor)
{
struct page *last_page = actor->last_page;
kfree(actor->tmp_buffer);
kfree(actor);
return last_page;
}
static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
{
return actor->squashfs_first_page(actor);
@@ -75,5 +50,8 @@ static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
{
actor->squashfs_finish_page(actor);
}
#endif
static inline void squashfs_actor_nobuff(struct squashfs_page_actor *actor)
{
actor->alloc_buffer = 0;
}
#endif

View File

@@ -47,6 +47,7 @@ struct squashfs_sb_info {
struct squashfs_cache *block_cache;
struct squashfs_cache *fragment_cache;
struct squashfs_cache *read_page;
struct address_space *cache_mapping;
int next_meta_index;
__le64 *id_table;
__le64 *fragment_index;

View File

@@ -257,6 +257,19 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
goto failed_mount;
}
if (msblk->devblksize == PAGE_SIZE) {
struct inode *cache = new_inode(sb);
if (cache == NULL)
goto failed_mount;
set_nlink(cache, 1);
cache->i_size = OFFSET_MAX;
mapping_set_gfp_mask(cache->i_mapping, GFP_NOFS);
msblk->cache_mapping = cache->i_mapping;
}
msblk->stream = squashfs_decompressor_setup(sb, flags);
if (IS_ERR(msblk->stream)) {
err = PTR_ERR(msblk->stream);
@@ -383,6 +396,8 @@ failed_mount:
squashfs_cache_delete(msblk->fragment_cache);
squashfs_cache_delete(msblk->read_page);
squashfs_decompressor_destroy(msblk);
if (msblk->cache_mapping)
iput(msblk->cache_mapping->host);
kfree(msblk->inode_lookup_table);
kfree(msblk->fragment_index);
kfree(msblk->id_table);
@@ -478,6 +493,8 @@ static void squashfs_put_super(struct super_block *sb)
squashfs_cache_delete(sbi->fragment_cache);
squashfs_cache_delete(sbi->read_page);
squashfs_decompressor_destroy(sbi);
if (sbi->cache_mapping)
iput(sbi->cache_mapping->host);
kfree(sbi->id_table);
kfree(sbi->fragment_index);
kfree(sbi->meta_index);

View File

@@ -131,6 +131,10 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
stream->buf.out_pos = 0;
stream->buf.out_size = PAGE_SIZE;
stream->buf.out = squashfs_first_page(output);
if (IS_ERR(stream->buf.out)) {
error = PTR_ERR(stream->buf.out);
goto finish;
}
for (;;) {
enum xz_ret xz_err;
@@ -156,7 +160,10 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (stream->buf.out_pos == stream->buf.out_size) {
stream->buf.out = squashfs_next_page(output);
if (stream->buf.out != NULL) {
if (IS_ERR(stream->buf.out)) {
error = PTR_ERR(stream->buf.out);
break;
} else if (stream->buf.out != NULL) {
stream->buf.out_pos = 0;
total += PAGE_SIZE;
}
@@ -171,6 +178,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
}
}
finish:
squashfs_finish_page(output);
return error ? error : total + stream->buf.out_pos;
@@ -183,5 +191,6 @@ const struct squashfs_decompressor squashfs_xz_comp_ops = {
.decompress = squashfs_xz_uncompress,
.id = XZ_COMPRESSION,
.name = "xz",
.alloc_buffer = 1,
.supported = 1
};

View File

@@ -62,6 +62,11 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
stream->next_out = squashfs_first_page(output);
stream->avail_in = 0;
if (IS_ERR(stream->next_out)) {
error = PTR_ERR(stream->next_out);
goto finish;
}
for (;;) {
int zlib_err;
@@ -85,7 +90,10 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (stream->avail_out == 0) {
stream->next_out = squashfs_next_page(output);
if (stream->next_out != NULL)
if (IS_ERR(stream->next_out)) {
error = PTR_ERR(stream->next_out);
break;
} else if (stream->next_out != NULL)
stream->avail_out = PAGE_SIZE;
}
@@ -107,6 +115,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
}
}
finish:
squashfs_finish_page(output);
if (!error)
@@ -122,6 +131,7 @@ const struct squashfs_decompressor squashfs_zlib_comp_ops = {
.decompress = zlib_uncompress,
.id = ZLIB_COMPRESSION,
.name = "zlib",
.alloc_buffer = 1,
.supported = 1
};

View File

@@ -80,6 +80,10 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
out_buf.size = PAGE_SIZE;
out_buf.dst = squashfs_first_page(output);
if (IS_ERR(out_buf.dst)) {
error = PTR_ERR(out_buf.dst);
goto finish;
}
for (;;) {
size_t zstd_err;
@@ -104,7 +108,10 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (out_buf.pos == out_buf.size) {
out_buf.dst = squashfs_next_page(output);
if (out_buf.dst == NULL) {
if (IS_ERR(out_buf.dst)) {
error = PTR_ERR(out_buf.dst);
break;
} else if (out_buf.dst == NULL) {
/* Shouldn't run out of pages
* before stream is done.
*/
@@ -129,6 +136,8 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
}
}
finish:
squashfs_finish_page(output);
return error ? error : total_out;
@@ -140,5 +149,6 @@ const struct squashfs_decompressor squashfs_zstd_comp_ops = {
.decompress = zstd_uncompress,
.id = ZSTD_COMPRESSION,
.name = "zstd",
.alloc_buffer = 1,
.supported = 1
};

View File

@@ -56,6 +56,10 @@ extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void);
extern void cpuset_wait_for_hotplug(void);
extern void inc_dl_tasks_cs(struct task_struct *task);
extern void dec_dl_tasks_cs(struct task_struct *task);
extern void cpuset_lock(void);
extern void cpuset_unlock(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -177,6 +181,11 @@ static inline void cpuset_update_active_cpus(void)
static inline void cpuset_wait_for_hotplug(void) { }
static inline void inc_dl_tasks_cs(struct task_struct *task) { }
static inline void dec_dl_tasks_cs(struct task_struct *task) { }
static inline void cpuset_lock(void) { }
static inline void cpuset_unlock(void) { }
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{

View File

@@ -1829,7 +1829,9 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
extern int task_can_attach(struct task_struct *p);
extern int dl_bw_alloc(int cpu, u64 dl_bw);
extern void dl_bw_free(int cpu, u64 dl_bw);
#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
extern bool cpupri_check_rt(void);

View File

@@ -343,8 +343,6 @@ enum kmalloc_cache_type {
extern struct kmem_cache *
kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
extern bool android_kmalloc_64_create;
/*
* Define gfp bits that should not be set for KMALLOC_NORMAL.
*/
@@ -396,9 +394,6 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
if (!size)
return 0;
if (android_kmalloc_64_create && size <= 64)
return 6;
if (size <= KMALLOC_MIN_SIZE)
return KMALLOC_SHIFT_LOW;

View File

@@ -624,6 +624,7 @@ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream);
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream);
void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream);
unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
unsigned long _snd_pcm_stream_lock_irqsave_nested(struct snd_pcm_substream *substream);
/**
* snd_pcm_stream_lock_irqsave - Lock the PCM stream
@@ -642,6 +643,20 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
unsigned long flags);
/**
* snd_pcm_stream_lock_irqsave_nested - Single-nested PCM stream locking
* @substream: PCM substream
* @flags: irq flags
*
* This locks the PCM stream like snd_pcm_stream_lock_irqsave() but with
* the single-depth lockdep subclass.
*/
#define snd_pcm_stream_lock_irqsave_nested(substream, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _snd_pcm_stream_lock_irqsave_nested(substream); \
} while (0)
/**
* snd_pcm_group_for_each_entry - iterate over the linked substreams
* @s: the iterator

View File

@@ -907,7 +907,11 @@ struct snd_soc_card {
struct mutex pcm_mutex;
enum snd_soc_pcm_subclass pcm_subclass;
#ifdef __GENKSYMS__
spinlock_t dpcm_lock;
#else
spinlock_t unused;
#endif
int (*probe)(struct snd_soc_card *card);
int (*late_probe)(struct snd_soc_card *card);
@@ -1094,7 +1098,17 @@ struct snd_soc_pcm_runtime {
int num_components;
ANDROID_KABI_RESERVE(1);
/* Android KABI preservation.
*
* dpcm_be_start[2] is the backport version of be_start from
* 848aedfdc6ba ("ASoC: soc-pcm: test refcount before triggering")
* which is originally in struct snd_soc_dpcm_runtime. Since we don't
* have ABI reserve fields there, we are adding this refcount variable
* here as an array for each BE stream.
*
* refcount protected by BE stream pcm lock
*/
ANDROID_KABI_USE(1, u32 dpcm_be_start[2]);
struct snd_soc_component *components[]; /* CPU/Codec/Platform */
};

View File

@@ -86,6 +86,31 @@ DECLARE_HOOK(android_vh_cma_drain_all_pages_bypass,
DECLARE_HOOK(android_vh_pcplist_add_cma_pages_bypass,
TP_PROTO(int migratetype, bool *bypass),
TP_ARGS(migratetype, bypass));
DECLARE_HOOK(android_vh_free_unref_page_bypass,
TP_PROTO(struct page *page, int order, int migratetype, bool *bypass),
TP_ARGS(page, order, migratetype, bypass));
DECLARE_HOOK(android_vh_kvmalloc_node_use_vmalloc,
TP_PROTO(size_t size, gfp_t *kmalloc_flags, bool *use_vmalloc),
TP_ARGS(size, kmalloc_flags, use_vmalloc));
DECLARE_HOOK(android_vh_should_alloc_pages_retry,
TP_PROTO(gfp_t gfp_mask, int order, int *alloc_flags,
int migratetype, struct zone *preferred_zone, struct page **page, bool *should_alloc_retry),
TP_ARGS(gfp_mask, order, alloc_flags,
migratetype, preferred_zone, page, should_alloc_retry));
DECLARE_HOOK(android_vh_unreserve_highatomic_bypass,
TP_PROTO(bool force, struct zone *zone, bool *skip_unreserve_highatomic),
TP_ARGS(force, zone, skip_unreserve_highatomic));
DECLARE_HOOK(android_vh_rmqueue_bulk_bypass,
TP_PROTO(unsigned int order, struct per_cpu_pages *pcp, int migratetype,
struct list_head *list),
TP_ARGS(order, pcp, migratetype, list));
DECLARE_HOOK(android_vh_ra_tuning_max_page,
TP_PROTO(struct readahead_control *ractl, unsigned long *max_page),
TP_ARGS(ractl, max_page));
DECLARE_HOOK(android_vh_tune_mmap_readaround,
TP_PROTO(unsigned int ra_pages, pgoff_t pgoff,
pgoff_t *start, unsigned int *size, unsigned int *async_size),
TP_ARGS(ra_pages, pgoff, start, size, async_size));
DECLARE_HOOK(android_vh_mmap_region,
TP_PROTO(struct vm_area_struct *vma, unsigned long addr),
TP_ARGS(vma, addr));
@@ -177,6 +202,33 @@ DECLARE_HOOK(android_vh_free_one_page_bypass,
TP_PROTO(struct page *page, struct zone *zone, int order, int migratetype,
int fpi_flags, bool *bypass),
TP_ARGS(page, zone, order, migratetype, fpi_flags, bypass));
DECLARE_HOOK(android_vh_use_cma_first_check,
TP_PROTO(bool *use_cma_first_check),
TP_ARGS(use_cma_first_check));
DECLARE_HOOK(android_vh_alloc_highpage_movable_gfp_adjust,
TP_PROTO(gfp_t *gfp_mask),
TP_ARGS(gfp_mask));
DECLARE_HOOK(android_vh_anon_gfp_adjust,
TP_PROTO(gfp_t *gfp_mask),
TP_ARGS(gfp_mask));
DECLARE_HOOK(android_vh_slab_page_alloced,
TP_PROTO(struct page *page, size_t size, gfp_t flags),
TP_ARGS(page, size, flags));
DECLARE_HOOK(android_vh_kmalloc_order_alloced,
TP_PROTO(struct page *page, size_t size, gfp_t flags),
TP_ARGS(page, size, flags));
DECLARE_HOOK(android_vh_compact_finished,
TP_PROTO(bool *abort_compact),
TP_ARGS(abort_compact));
DECLARE_HOOK(android_vh_madvise_cold_or_pageout_abort,
TP_PROTO(struct vm_area_struct *vma, bool *abort_madvise),
TP_ARGS(vma, abort_madvise));
DECLARE_HOOK(android_vh_alloc_flags_cma_adjust,
TP_PROTO(gfp_t gfp_mask, unsigned int *alloc_flags),
TP_ARGS(gfp_mask, alloc_flags));
DECLARE_HOOK(android_vh_rmqueue_cma_fallback,
TP_PROTO(struct zone *zone, unsigned int order, struct page **page),
TP_ARGS(zone, order, page));
#endif /* _TRACE_HOOK_MM_H */
/* This part must be outside protection */

View File

@@ -191,6 +191,7 @@ config GKI_HIDDEN_NET_CONFIGS
bool "Hidden networking configuration needed for GKI"
select PAGE_POOL
select NET_PTP_CLASSIFY
select NET_DEVLINK
help
Dummy config option used to enable the networking hidden
config, required by various SoC platforms.

View File

@@ -56,6 +56,7 @@
#include <linux/file.h>
#include <linux/fs_parser.h>
#include <linux/sched/cputime.h>
#include <linux/sched/deadline.h>
#include <linux/psi.h>
#include <net/sock.h>
@@ -4323,6 +4324,7 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
cft->flags |= __CFTYPE_ONLY_ON_DFL;
return cgroup_add_cftypes(ss, cfts);
}
EXPORT_SYMBOL_GPL(cgroup_add_dfl_cftypes);
/**
* cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
@@ -6480,6 +6482,9 @@ void cgroup_exit(struct task_struct *tsk)
list_add_tail(&tsk->cg_list, &cset->dying_tasks);
cset->nr_tasks--;
if (dl_task(tsk))
dec_dl_tasks_cs(tsk);
WARN_ON_ONCE(cgroup_task_frozen(tsk));
if (unlikely(!(tsk->flags & PF_KTHREAD) &&
test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags)))

View File

@@ -166,6 +166,14 @@ struct cpuset {
int use_parent_ecpus;
int child_ecpus_count;
/*
* number of SCHED_DEADLINE tasks attached to this cpuset, so that we
* know when to rebuild associated root domain bandwidth information.
*/
int nr_deadline_tasks;
int nr_migrate_dl_tasks;
u64 sum_migrate_dl_bw;
/* Handle for cpuset.cpus.partition */
struct cgroup_file partition_file;
};
@@ -213,6 +221,20 @@ static inline struct cpuset *parent_cs(struct cpuset *cs)
return css_cs(cs->css.parent);
}
void inc_dl_tasks_cs(struct task_struct *p)
{
struct cpuset *cs = task_cs(p);
cs->nr_deadline_tasks++;
}
void dec_dl_tasks_cs(struct task_struct *p)
{
struct cpuset *cs = task_cs(p);
cs->nr_deadline_tasks--;
}
/* bits in struct cpuset flags field */
typedef enum {
CS_ONLINE,
@@ -316,22 +338,23 @@ static struct cpuset top_cpuset = {
if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
/*
* There are two global locks guarding cpuset structures - cpuset_rwsem and
* There are two global locks guarding cpuset structures - cpuset_mutex and
* callback_lock. We also require taking task_lock() when dereferencing a
* task's cpuset pointer. See "The task_lock() exception", at the end of this
* comment. The cpuset code uses only cpuset_rwsem write lock. Other
* kernel subsystems can use cpuset_read_lock()/cpuset_read_unlock() to
* prevent change to cpuset structures.
* comment. The cpuset code uses only cpuset_mutex. Other kernel subsystems
* can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
* structures. Note that cpuset_mutex needs to be a mutex as it is used in
* paths that rely on priority inheritance (e.g. scheduler - on RT) for
* correctness.
*
* A task must hold both locks to modify cpusets. If a task holds
* cpuset_rwsem, it blocks others wanting that rwsem, ensuring that it
* is the only task able to also acquire callback_lock and be able to
* modify cpusets. It can perform various checks on the cpuset structure
* first, knowing nothing will change. It can also allocate memory while
* just holding cpuset_rwsem. While it is performing these checks, various
* callback routines can briefly acquire callback_lock to query cpusets.
* Once it is ready to make the changes, it takes callback_lock, blocking
* everyone else.
* cpuset_mutex, it blocks others, ensuring that it is the only task able to
* also acquire callback_lock and be able to modify cpusets. It can perform
* various checks on the cpuset structure first, knowing nothing will change.
* It can also allocate memory while just holding cpuset_mutex. While it is
* performing these checks, various callback routines can briefly acquire
* callback_lock to query cpusets. Once it is ready to make the changes, it
* takes callback_lock, blocking everyone else.
*
* Calls to the kernel memory allocator can not be made while holding
* callback_lock, as that would risk double tripping on callback_lock
@@ -353,7 +376,18 @@ static struct cpuset top_cpuset = {
* guidelines for accessing subsystem state in kernel/cgroup.c
*/
DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem);
static DEFINE_MUTEX(cpuset_mutex);
void cpuset_lock(void)
{
mutex_lock(&cpuset_mutex);
}
void cpuset_unlock(void)
{
mutex_unlock(&cpuset_mutex);
}
static DEFINE_SPINLOCK(callback_lock);
static struct workqueue_struct *cpuset_migrate_mm_wq;
@@ -389,7 +423,7 @@ static inline bool is_in_v2_mode(void)
* One way or another, we guarantee to return some non-empty subset
* of cpu_online_mask.
*
* Call with callback_lock or cpuset_rwsem held.
* Call with callback_lock or cpuset_mutex held.
*/
static void guarantee_online_cpus(struct task_struct *tsk,
struct cpumask *pmask)
@@ -431,7 +465,7 @@ out_unlock:
* One way or another, we guarantee to return some non-empty subset
* of node_states[N_MEMORY].
*
* Call with callback_lock or cpuset_rwsem held.
* Call with callback_lock or cpuset_mutex held.
*/
static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
{
@@ -443,7 +477,8 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
/*
* update task's spread flag if cpuset's page/slab spread flag is set
*
* Call with callback_lock or cpuset_rwsem held.
* Call with callback_lock or cpuset_mutex held. The check can be skipped
* if on default hierarchy.
*/
static void cpuset_update_task_spread_flag(struct cpuset *cs,
struct task_struct *tsk)
@@ -464,7 +499,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
*
* One cpuset is a subset of another if all its allowed CPUs and
* Memory Nodes are a subset of the other, and its exclusive flags
* are only set if the other's are set. Call holding cpuset_rwsem.
* are only set if the other's are set. Call holding cpuset_mutex.
*/
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
@@ -580,7 +615,7 @@ static inline void free_cpuset(struct cpuset *cs)
* If we replaced the flag and mask values of the current cpuset
* (cur) with those values in the trial cpuset (trial), would
* our various subset and exclusive rules still be valid? Presumes
* cpuset_rwsem held.
* cpuset_mutex held.
*
* 'cur' is the address of an actual, in-use cpuset. Operations
* such as list traversal that depend on the actual address of the
@@ -703,7 +738,7 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
rcu_read_unlock();
}
/* Must be called with cpuset_rwsem held. */
/* Must be called with cpuset_mutex held. */
static inline int nr_cpusets(void)
{
/* jump label reference count + the top-level cpuset */
@@ -729,7 +764,7 @@ static inline int nr_cpusets(void)
* domains when operating in the severe memory shortage situations
* that could cause allocation failures below.
*
* Must be called with cpuset_rwsem held.
* Must be called with cpuset_mutex held.
*
* The three key local variables below are:
* cp - cpuset pointer, used (together with pos_css) to perform a
@@ -940,11 +975,14 @@ done:
return ndoms;
}
static void update_tasks_root_domain(struct cpuset *cs)
static void dl_update_tasks_root_domain(struct cpuset *cs)
{
struct css_task_iter it;
struct task_struct *task;
if (cs->nr_deadline_tasks == 0)
return;
css_task_iter_start(&cs->css, 0, &it);
while ((task = css_task_iter_next(&it)))
@@ -953,12 +991,12 @@ static void update_tasks_root_domain(struct cpuset *cs)
css_task_iter_end(&it);
}
static void rebuild_root_domains(void)
static void dl_rebuild_rd_accounting(void)
{
struct cpuset *cs = NULL;
struct cgroup_subsys_state *pos_css;
percpu_rwsem_assert_held(&cpuset_rwsem);
lockdep_assert_held(&cpuset_mutex);
lockdep_assert_cpus_held();
lockdep_assert_held(&sched_domains_mutex);
@@ -981,7 +1019,7 @@ static void rebuild_root_domains(void)
rcu_read_unlock();
update_tasks_root_domain(cs);
dl_update_tasks_root_domain(cs);
rcu_read_lock();
css_put(&cs->css);
@@ -995,7 +1033,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
{
mutex_lock(&sched_domains_mutex);
partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
rebuild_root_domains();
dl_rebuild_rd_accounting();
mutex_unlock(&sched_domains_mutex);
}
@@ -1008,7 +1046,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
* 'cpus' is removed, then call this routine to rebuild the
* scheduler's dynamic sched domains.
*
* Call with cpuset_rwsem held. Takes cpus_read_lock().
* Call with cpuset_mutex held. Takes cpus_read_lock().
*/
static void rebuild_sched_domains_locked(void)
{
@@ -1019,7 +1057,7 @@ static void rebuild_sched_domains_locked(void)
int ndoms;
lockdep_assert_cpus_held();
percpu_rwsem_assert_held(&cpuset_rwsem);
lockdep_assert_held(&cpuset_mutex);
/*
* If we have raced with CPU hotplug, return early to avoid
@@ -1070,9 +1108,9 @@ static void rebuild_sched_domains_locked(void)
void rebuild_sched_domains(void)
{
cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
rebuild_sched_domains_locked();
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(rebuild_sched_domains);
@@ -1094,7 +1132,7 @@ static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p,
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
*
* Iterate through each task of @cs updating its cpus_allowed to the
* effective cpuset's. As this function is called with cpuset_rwsem held,
* effective cpuset's. As this function is called with cpuset_mutex held,
* cpuset membership stays stable.
*/
static void update_tasks_cpumask(struct cpuset *cs)
@@ -1201,7 +1239,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
int old_prs, new_prs;
bool part_error = false; /* Partition error? */
percpu_rwsem_assert_held(&cpuset_rwsem);
lockdep_assert_held(&cpuset_mutex);
/*
* The parent must be a partition root.
@@ -1371,7 +1409,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
*
* On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
*
* Called with cpuset_rwsem held
* Called with cpuset_mutex held
*/
static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
{
@@ -1534,7 +1572,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
struct cpuset *sibling;
struct cgroup_subsys_state *pos_css;
percpu_rwsem_assert_held(&cpuset_rwsem);
lockdep_assert_held(&cpuset_mutex);
/*
* Check all its siblings and call update_cpumasks_hier()
@@ -1739,12 +1777,12 @@ static void *cpuset_being_rebound;
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
*
* Iterate through each task of @cs updating its mems_allowed to the
* effective cpuset's. As this function is called with cpuset_rwsem held,
* effective cpuset's. As this function is called with cpuset_mutex held,
* cpuset membership stays stable.
*/
static void update_tasks_nodemask(struct cpuset *cs)
{
static nodemask_t newmems; /* protected by cpuset_rwsem */
static nodemask_t newmems; /* protected by cpuset_mutex */
struct css_task_iter it;
struct task_struct *task;
@@ -1757,7 +1795,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
* take while holding tasklist_lock. Forks can happen - the
* mpol_dup() cpuset_being_rebound check will catch such forks,
* and rebind their vma mempolicies too. Because we still hold
* the global cpuset_rwsem, we know that no other rebind effort
* the global cpuset_mutex, we know that no other rebind effort
* will be contending for the global variable cpuset_being_rebound.
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes.
@@ -1803,7 +1841,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
*
* On legacy hierarchy, effective_mems will be the same with mems_allowed.
*
* Called with cpuset_rwsem held
* Called with cpuset_mutex held
*/
static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
{
@@ -1856,7 +1894,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
* mempolicies and if the cpuset is marked 'memory_migrate',
* migrate the tasks pages to the new memory.
*
* Call with cpuset_rwsem held. May take callback_lock during call.
* Call with cpuset_mutex held. May take callback_lock during call.
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
* lock each such tasks mm->mmap_lock, scan its vma's and rebind
* their mempolicies to the cpusets new mems_allowed.
@@ -1946,7 +1984,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
* @cs: the cpuset in which each task's spread flags needs to be changed
*
* Iterate through each task of @cs updating its spread flags. As this
* function is called with cpuset_rwsem held, cpuset membership stays
* function is called with cpuset_mutex held, cpuset membership stays
* stable.
*/
static void update_tasks_flags(struct cpuset *cs)
@@ -1966,7 +2004,7 @@ static void update_tasks_flags(struct cpuset *cs)
* cs: the cpuset to update
* turning_on: whether the flag is being set or cleared
*
* Call with cpuset_rwsem held.
* Call with cpuset_mutex held.
*/
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
@@ -2015,7 +2053,7 @@ out:
* cs: the cpuset to update
* new_prs: new partition root state
*
* Call with cpuset_rwsem held.
* Call with cpuset_mutex held.
*/
static int update_prstate(struct cpuset *cs, int new_prs)
{
@@ -2197,19 +2235,26 @@ static int fmeter_getrate(struct fmeter *fmp)
static struct cpuset *cpuset_attach_old_cs;
/* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
static void reset_migrate_dl_data(struct cpuset *cs)
{
cs->nr_migrate_dl_tasks = 0;
cs->sum_migrate_dl_bw = 0;
}
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
static int cpuset_can_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
struct cpuset *cs;
struct cpuset *cs, *oldcs;
struct task_struct *task;
int ret;
/* used later by cpuset_attach() */
cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
oldcs = cpuset_attach_old_cs;
cs = css_cs(css);
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
/* allow moving tasks into an empty cpuset if on default hierarchy */
ret = -ENOSPC;
@@ -2218,14 +2263,39 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
goto out_unlock;
cgroup_taskset_for_each(task, css, tset) {
ret = task_can_attach(task, cs->effective_cpus);
ret = task_can_attach(task);
if (ret)
goto out_unlock;
ret = security_task_setscheduler(task);
if (ret)
goto out_unlock;
if (dl_task(task)) {
cs->nr_migrate_dl_tasks++;
cs->sum_migrate_dl_bw += task->dl.dl_bw;
}
}
if (!cs->nr_migrate_dl_tasks)
goto out_success;
if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
if (unlikely(cpu >= nr_cpu_ids)) {
reset_migrate_dl_data(cs);
ret = -EINVAL;
goto out_unlock;
}
ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
if (ret) {
reset_migrate_dl_data(cs);
goto out_unlock;
}
}
out_success:
/*
* Mark attach is in progress. This makes validate_change() fail
* changes which zero cpus/mems_allowed.
@@ -2233,7 +2303,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
cs->attach_in_progress++;
ret = 0;
out_unlock:
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
return ret;
}
@@ -2245,15 +2315,23 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
cgroup_taskset_first(tset, &css);
cs = css_cs(css);
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
cs->attach_in_progress--;
if (!cs->attach_in_progress)
wake_up(&cpuset_attach_wq);
percpu_up_write(&cpuset_rwsem);
if (cs->nr_migrate_dl_tasks) {
int cpu = cpumask_any(cs->effective_cpus);
dl_bw_free(cpu, cs->sum_migrate_dl_bw);
reset_migrate_dl_data(cs);
}
mutex_unlock(&cpuset_mutex);
}
/*
* Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach()
* Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach()
* but we can't allocate it dynamically there. Define it global and
* allocate from cpuset_init().
*/
@@ -2261,7 +2339,7 @@ static cpumask_var_t cpus_attach;
static void cpuset_attach(struct cgroup_taskset *tset)
{
/* static buf protected by cpuset_rwsem */
/* static buf protected by cpuset_mutex */
static nodemask_t cpuset_attach_nodemask_to;
struct task_struct *task;
struct task_struct *leader;
@@ -2273,7 +2351,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
cs = css_cs(css);
lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
@@ -2321,11 +2399,17 @@ static void cpuset_attach(struct cgroup_taskset *tset)
cs->old_mems_allowed = cpuset_attach_nodemask_to;
if (cs->nr_migrate_dl_tasks) {
cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
reset_migrate_dl_data(cs);
}
cs->attach_in_progress--;
if (!cs->attach_in_progress)
wake_up(&cpuset_attach_wq);
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
}
/* The various types of files and directories in a cpuset file system */
@@ -2357,7 +2441,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
int retval = 0;
cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs)) {
retval = -ENODEV;
goto out_unlock;
@@ -2393,7 +2477,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
break;
}
out_unlock:
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
return retval;
}
@@ -2406,7 +2490,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
int retval = -ENODEV;
cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs))
goto out_unlock;
@@ -2419,7 +2503,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
break;
}
out_unlock:
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
return retval;
}
@@ -2452,7 +2536,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
* operation like this one can lead to a deadlock through kernfs
* active_ref protection. Let's break the protection. Losing the
* protection is okay as we check whether @cs is online after
* grabbing cpuset_rwsem anyway. This only happens on the legacy
* grabbing cpuset_mutex anyway. This only happens on the legacy
* hierarchies.
*/
css_get(&cs->css);
@@ -2460,7 +2544,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
flush_work(&cpuset_hotplug_work);
cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs))
goto out_unlock;
@@ -2484,7 +2568,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
free_cpuset(trialcs);
out_unlock:
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
kernfs_unbreak_active_protection(of->kn);
css_put(&cs->css);
@@ -2617,13 +2701,13 @@ static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
css_get(&cs->css);
cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs))
goto out_unlock;
retval = update_prstate(cs, val);
out_unlock:
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
css_put(&cs->css);
return retval ?: nbytes;
@@ -2836,7 +2920,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
return 0;
cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
set_bit(CS_ONLINE, &cs->flags);
if (is_spread_page(parent))
@@ -2888,7 +2972,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
spin_unlock_irq(&callback_lock);
out_unlock:
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
return 0;
}
@@ -2909,7 +2993,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
struct cpuset *cs = css_cs(css);
cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
if (is_partition_root(cs))
update_prstate(cs, 0);
@@ -2928,7 +3012,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
cpuset_dec();
clear_bit(CS_ONLINE, &cs->flags);
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
}
@@ -2941,7 +3025,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
spin_lock_irq(&callback_lock);
if (is_in_v2_mode()) {
@@ -2954,7 +3038,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
}
spin_unlock_irq(&callback_lock);
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
}
/*
@@ -2999,8 +3083,6 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
int __init cpuset_init(void)
{
BUG_ON(percpu_init_rwsem(&cpuset_rwsem));
BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
@@ -3074,7 +3156,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
is_empty = cpumask_empty(cs->cpus_allowed) ||
nodes_empty(cs->mems_allowed);
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
/*
* Move tasks to the nearest ancestor with execution resources,
@@ -3084,7 +3166,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
if (is_empty)
remove_tasks_in_empty_cpuset(cs);
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
}
static void
@@ -3134,14 +3216,14 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
retry:
wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
/*
* We have raced with task attaching. We wait until attaching
* is finished, so we won't attach a task to an empty cpuset.
*/
if (cs->attach_in_progress) {
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
goto retry;
}
@@ -3219,7 +3301,7 @@ update_tasks:
hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
cpus_updated, mems_updated);
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
}
/**
@@ -3249,7 +3331,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
if (on_dfl && !alloc_cpumasks(NULL, &tmp))
ptmp = &tmp;
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
/* fetch the available cpus/mems and find out which changed how */
cpumask_copy(&new_cpus, cpu_active_mask);
@@ -3306,7 +3388,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
update_tasks_nodemask(&top_cpuset);
}
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
/* if cpus or mems changed, we need to propagate to descendants */
if (cpus_updated || mems_updated) {
@@ -3739,7 +3821,7 @@ void __cpuset_memory_pressure_bump(void)
* - Used for /proc/<pid>/cpuset.
* - No need to task_lock(tsk) on this tsk->cpuset reference, as it
* doesn't really matter if tsk->cpuset changes after we read it,
* and we take cpuset_rwsem, keeping cpuset_attach() from changing it
* and we take cpuset_mutex, keeping cpuset_attach() from changing it
* anyway.
*/
int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,

View File

@@ -1018,6 +1018,8 @@ queue:
raw_spin_unlock_irq(&sem->wait_lock);
rwsem_set_reader_owned(sem);
lockevent_inc(rwsem_rlock_fast);
trace_android_vh_record_rwsem_lock_starttime(
current, jiffies);
return sem;
}
adjustment += RWSEM_FLAG_WAITERS;

View File

@@ -7451,6 +7451,7 @@ static int __sched_setscheduler(struct task_struct *p,
int reset_on_fork;
int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
struct rq *rq;
bool cpuset_locked = false;
/* The pi code expects interrupts enabled */
BUG_ON(pi && in_interrupt());
@@ -7551,6 +7552,15 @@ recheck:
return retval;
}
/*
* SCHED_DEADLINE bandwidth accounting relies on stable cpusets
* information.
*/
if (dl_policy(policy) || dl_policy(p->policy)) {
cpuset_locked = true;
cpuset_lock();
}
/*
* Make sure no PI-waiters arrive (or leave) while we are
* changing the priority of the task:
@@ -7625,6 +7635,8 @@ change:
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
task_rq_unlock(rq, p, &rf);
if (cpuset_locked)
cpuset_unlock();
goto recheck;
}
@@ -7690,8 +7702,11 @@ change:
head = splice_balance_callbacks(rq);
task_rq_unlock(rq, p, &rf);
if (pi)
if (pi) {
if (cpuset_locked)
cpuset_unlock();
rt_mutex_adjust_pi(p);
}
/* Run balance callbacks after we've adjusted the PI chain: */
balance_callbacks(rq, head);
@@ -7701,6 +7716,8 @@ change:
unlock:
task_rq_unlock(rq, p, &rf);
if (cpuset_locked)
cpuset_unlock();
return retval;
}
@@ -8926,8 +8943,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
return ret;
}
int task_can_attach(struct task_struct *p,
const struct cpumask *cs_effective_cpus)
int task_can_attach(struct task_struct *p)
{
int ret = 0;
@@ -8940,21 +8956,9 @@ int task_can_attach(struct task_struct *p,
* success of set_cpus_allowed_ptr() on all attached tasks
* before cpus_mask may be changed.
*/
if (p->flags & PF_NO_SETAFFINITY) {
if (p->flags & PF_NO_SETAFFINITY)
ret = -EINVAL;
goto out;
}
if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
cs_effective_cpus)) {
int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus);
if (unlikely(cpu >= nr_cpu_ids))
return -EINVAL;
ret = dl_cpu_busy(cpu, p);
}
out:
return ret;
}
@@ -9254,7 +9258,7 @@ static void cpuset_cpu_active(void)
static int cpuset_cpu_inactive(unsigned int cpu)
{
if (!cpuhp_tasks_frozen) {
int ret = dl_cpu_busy(cpu, NULL);
int ret = dl_bw_check_overflow(cpu);
if (ret)
return ret;

View File

@@ -18,6 +18,7 @@
#include "sched.h"
#include "pelt.h"
#include <trace/hooks/sched.h>
#include <linux/cpuset.h>
struct dl_bandwidth def_dl_bandwidth;
@@ -2453,6 +2454,12 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
if (task_on_rq_queued(p) && p->dl.dl_runtime)
task_non_contending(p);
/*
* In case a task is setscheduled out from SCHED_DEADLINE we need to
* keep track of that on its cpuset (for correct bandwidth tracking).
*/
dec_dl_tasks_cs(p);
if (!task_on_rq_queued(p)) {
/*
* Inactive timer is armed. However, p is leaving DEADLINE and
@@ -2493,6 +2500,12 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
/*
* In case a task is setscheduled to SCHED_DEADLINE we need to keep
* track of that on its cpuset (for correct bandwidth tracking).
*/
inc_dl_tasks_cs(p);
/* If p is not queued we will update its parameters at next wakeup. */
if (!task_on_rq_queued(p)) {
add_rq_bw(&p->dl, &rq->dl);
@@ -2892,26 +2905,38 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
return ret;
}
int dl_cpu_busy(int cpu, struct task_struct *p)
enum dl_bw_request {
dl_bw_req_check_overflow = 0,
dl_bw_req_alloc,
dl_bw_req_free
};
static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
{
unsigned long flags, cap;
unsigned long flags;
struct dl_bw *dl_b;
bool overflow;
bool overflow = 0;
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
cap = dl_bw_capacity(cpu);
overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0);
if (!overflow && p) {
/*
* We reserve space for this task in the destination
* root_domain, as we can't fail after this point.
* We will free resources in the source root_domain
* later on (see set_cpus_allowed_dl()).
*/
__dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu));
if (req == dl_bw_req_free) {
__dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
} else {
unsigned long cap = dl_bw_capacity(cpu);
overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
if (req == dl_bw_req_alloc && !overflow) {
/*
* We reserve space in the destination
* root_domain, as we can't fail after this point.
* We will free resources in the source root_domain
* later on (see set_cpus_allowed_dl()).
*/
__dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
}
}
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
@@ -2919,6 +2944,21 @@ int dl_cpu_busy(int cpu, struct task_struct *p)
return overflow ? -EBUSY : 0;
}
int dl_bw_check_overflow(int cpu)
{
return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
}
int dl_bw_alloc(int cpu, u64 dl_bw)
{
return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
}
void dl_bw_free(int cpu, u64 dl_bw)
{
dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
}
#endif
#ifdef CONFIG_SCHED_DEBUG

View File

@@ -351,7 +351,7 @@ extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
extern bool __checkparam_dl(const struct sched_attr *attr);
extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int dl_cpu_busy(int cpu, struct task_struct *p);
extern int dl_bw_check_overflow(int cpu);
#ifdef CONFIG_CGROUP_SCHED

View File

@@ -45,6 +45,11 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
#define CREATE_TRACE_POINTS
#include <trace/events/compaction.h>
#undef CREATE_TRACE_POINTS
#ifndef __GENKSYMS__
#include <trace/hooks/mm.h>
#endif
#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
#define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
@@ -2084,6 +2089,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
unsigned int order;
const int migratetype = cc->migratetype;
int ret;
bool abort_compact = false;
/* Compaction run completes if the migrate and free scanner meet */
if (compact_scanners_met(cc)) {
@@ -2183,7 +2189,8 @@ static enum compact_result __compact_finished(struct compact_control *cc)
}
out:
if (cc->contended || fatal_signal_pending(current))
trace_android_vh_compact_finished(&abort_compact);
if (cc->contended || fatal_signal_pending(current) || abort_compact)
ret = COMPACT_CONTENDED;
return ret;

View File

@@ -2983,6 +2983,8 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
ra->size = ra->ra_pages;
ra->async_size = ra->ra_pages / 4;
trace_android_vh_tune_mmap_readaround(ra->ra_pages, vmf->pgoff,
&ra->start, &ra->size, &ra->async_size);
ractl._index = ra->start;
do_page_cache_ra(&ractl, ra->size, ra->async_size);
return fpin;

View File

@@ -328,8 +328,10 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
struct page *page = NULL;
LIST_HEAD(page_list);
bool allow_shared = false;
bool abort_madvise = false;
if (fatal_signal_pending(current))
trace_android_vh_madvise_cold_or_pageout_abort(vma, &abort_madvise);
if (fatal_signal_pending(current) || abort_madvise)
return -EINTR;
trace_android_vh_madvise_cold_or_pageout(vma, &allow_shared);

View File

@@ -1410,6 +1410,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
unsigned int order, bool check_free, fpi_t fpi_flags)
{
int bad = 0;
bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
bool init = want_init_on_free();
VM_BUG_ON_PAGE(PageTail(page), page);
@@ -1483,7 +1484,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
* With hardware tag-based KASAN, memory tags must be set before the
* page becomes unavailable via debug_pagealloc or arch_free_page.
*/
if (!should_skip_kasan_poison(page, fpi_flags)) {
if (!skip_kasan_poison) {
kasan_poison_pages(page, order, init);
/* Memory is already initialized if KASAN did it internally. */
@@ -1761,11 +1762,15 @@ static void __free_pages_ok(struct page *page, unsigned int order,
int migratetype;
unsigned long pfn = page_to_pfn(page);
struct zone *zone = page_zone(page);
bool skip_free_unref_page = false;
if (!free_pages_prepare(page, order, true, fpi_flags))
return;
migratetype = get_pfnblock_migratetype(page, pfn);
trace_android_vh_free_unref_page_bypass(page, order, migratetype, &skip_free_unref_page);
if (skip_free_unref_page)
return;
spin_lock_irqsave(&zone->lock, flags);
if (unlikely(has_isolate_pageblock(zone) ||
@@ -2992,6 +2997,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
struct page *page;
int order;
bool ret;
bool skip_unreserve_highatomic = false;
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
ac->nodemask) {
@@ -3003,6 +3009,11 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
pageblock_nr_pages)
continue;
trace_android_vh_unreserve_highatomic_bypass(force, zone,
&skip_unreserve_highatomic);
if (skip_unreserve_highatomic)
continue;
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
struct free_area *area = &(zone->free_area[order]);
@@ -3159,6 +3170,12 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
retry:
page = __rmqueue_smallest(zone, order, migratetype);
/*
* let normal GFP_MOVABLE has chance to try MIGRATE_CMA
*/
if (unlikely(!page) && (migratetype == MIGRATE_MOVABLE))
trace_android_vh_rmqueue_cma_fallback(zone, order, &page);
if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype,
alloc_flags))
goto retry;
@@ -3260,6 +3277,10 @@ static struct list_head *get_populated_pcp_list(struct zone *zone,
int batch = READ_ONCE(pcp->batch);
int alloced;
trace_android_vh_rmqueue_bulk_bypass(order, pcp, migratetype, list);
if (!list_empty(list))
return list;
/*
* Scale batch relative to order if batch implies
* free pages can be stored on the PCP. Batch can
@@ -3579,10 +3600,16 @@ void free_unref_page(struct page *page, unsigned int order)
unsigned long pfn = page_to_pfn(page);
int migratetype;
bool pcp_skip_cma_pages = false;
bool skip_free_unref_page = false;
if (!free_unref_page_prepare(page, pfn, order))
return;
migratetype = get_pcppage_migratetype(page);
trace_android_vh_free_unref_page_bypass(page, order, migratetype, &skip_free_unref_page);
if (skip_free_unref_page)
return;
/*
* We only track unmovable, reclaimable movable, and CMA on pcp lists.
* Place ISOLATE pages on the isolated list because they are being
@@ -3812,6 +3839,44 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
#endif
}
#ifdef CONFIG_CMA
/*
* GFP_MOVABLE allocation could drain UNMOVABLE & RECLAIMABLE page blocks via
* the help of CMA which makes GFP_KERNEL failed. Checking if zone_watermark_ok
* again without ALLOC_CMA to see if to use CMA first.
*/
static bool use_cma_first(struct zone *zone, unsigned int order, unsigned int alloc_flags)
{
unsigned long watermark;
bool cma_first = false;
watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
/* check if GFP_MOVABLE pass previous zone_watermark_ok via the help of CMA */
if (zone_watermark_ok(zone, order, watermark, 0, alloc_flags & (~ALLOC_CMA))) {
/*
* Balance movable allocations between regular and CMA areas by
* allocating from CMA when over half of the zone's free memory
* is in the CMA area.
*/
cma_first = (zone_page_state(zone, NR_FREE_CMA_PAGES) >
zone_page_state(zone, NR_FREE_PAGES) / 2);
} else {
/*
* watermark failed means UNMOVABLE & RECLAIMBLE is not enough
* now, we should use cma first to keep them stay around the
* corresponding watermark
*/
cma_first = true;
}
return cma_first;
}
#else
static bool use_cma_first(struct zone *zone, unsigned int order, unsigned int alloc_flags)
{
return false;
}
#endif
static __always_inline
struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
unsigned int order, unsigned int alloc_flags,
@@ -3835,12 +3900,26 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
trace_mm_page_alloc_zone_locked(page, order, migratetype);
}
if (!page) {
if (alloc_flags & ALLOC_CMA && migratetype == MIGRATE_MOVABLE)
page = __rmqueue_cma(zone, order, migratetype,
alloc_flags);
/*
* Balance movable allocations between regular and CMA areas by
* allocating from CMA base on judging zone_watermark_ok again
* to see if the latest check got pass via the help of CMA
*/
if (alloc_flags & ALLOC_CMA) {
bool use_cma_first_check = false;
bool try_cma;
trace_android_vh_use_cma_first_check(&use_cma_first_check);
try_cma = use_cma_first_check ?
use_cma_first(zone, order, alloc_flags) :
migratetype == MIGRATE_MOVABLE;
if (try_cma)
page = __rmqueue_cma(zone, order, migratetype,
alloc_flags);
}
if (!page)
page = __rmqueue(zone, order, migratetype,
alloc_flags);
alloc_flags);
}
if (!page) {
spin_unlock_irqrestore(&zone->lock, flags);
@@ -4266,6 +4345,7 @@ static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
#ifdef CONFIG_CMA
if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE && gfp_mask & __GFP_CMA)
alloc_flags |= ALLOC_CMA;
trace_android_vh_alloc_flags_cma_adjust(gfp_mask, &alloc_flags);
#endif
return alloc_flags;
}
@@ -5156,6 +5236,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
unsigned int zonelist_iter_cookie;
int reserve_flags;
unsigned long alloc_start = jiffies;
bool should_alloc_retry = false;
/*
* We also sanity check to catch abuse of atomic reserves being used by
* callers that are not in atomic context.
@@ -5294,6 +5375,11 @@ retry:
if (page)
goto got_pg;
trace_android_vh_should_alloc_pages_retry(gfp_mask, order, &alloc_flags,
ac->migratetype, ac->preferred_zoneref->zone, &page, &should_alloc_retry);
if (should_alloc_retry)
goto retry;
/* Try direct reclaim and then allocating */
page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
&did_some_progress);

View File

@@ -463,6 +463,8 @@ static void ondemand_readahead(struct readahead_control *ractl,
if (req_size > max_pages && bdi->io_pages > max_pages)
max_pages = min(req_size, bdi->io_pages);
trace_android_vh_ra_tuning_max_page(ractl, &max_pages);
/*
* start of file
*/

View File

@@ -646,15 +646,6 @@ EXPORT_SYMBOL_GPL(kmem_dump_obj);
#endif
#ifndef CONFIG_SLOB
static int __init setup_android_kmalloc_64_create(char *str)
{
if (IS_ALIGNED(64, cache_line_size()))
android_kmalloc_64_create = true;
return 1;
}
__setup("android_kmalloc_64_create", setup_android_kmalloc_64_create);
/* Create a cache during boot when no slab services are available yet */
void __init create_boot_cache(struct kmem_cache *s, const char *name,
unsigned int size, slab_flags_t flags,
@@ -663,14 +654,6 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name,
int err;
unsigned int align = ARCH_KMALLOC_MINALIGN;
/*
* Ensure object alignment is 64. Otherwise, it can be larger
* (e.g. 128 with ARM64), which causes SLUB to increase the object
* size to 128 bytes to conform with the alignment.
*/
if (android_kmalloc_64_create && size == 64)
align = 64;
s->name = name;
s->size = s->object_size = size;
@@ -715,6 +698,7 @@ kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
{ /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
EXPORT_SYMBOL(kmalloc_caches);
/* This variable is intentionally unused. Preserved for KMI stability. */
bool android_kmalloc_64_create __ro_after_init;
EXPORT_SYMBOL(android_kmalloc_64_create);
@@ -864,10 +848,6 @@ void __init setup_kmalloc_cache_index_table(void)
size_index[elem] = KMALLOC_SHIFT_LOW;
}
if (android_kmalloc_64_create)
for (i = 8; i <= 64; i += 8)
size_index[size_index_elem(i)] = 6;
if (KMALLOC_MIN_SIZE >= 64) {
/*
* The 96 byte size cache is not used if the alignment
@@ -925,10 +905,6 @@ void __init create_kmalloc_caches(slab_flags_t flags)
int i;
enum kmalloc_cache_type type;
if (android_kmalloc_64_create)
for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++)
new_kmalloc_cache(6, type, flags);
/*
* Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
*/
@@ -1002,6 +978,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
PAGE_SIZE << order);
}
trace_android_vh_kmalloc_order_alloced(page, size, flags);
ret = kasan_kmalloc_large(ret, size, flags);
/* As ret might get tagged, call kmemleak hook after KASAN. */
kmemleak_alloc(ret, size, 1, flags);

View File

@@ -1794,6 +1794,8 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
else
page = __alloc_pages_node(node, flags, order);
trace_android_vh_slab_page_alloced(page, s->size, flags);
return page;
}

View File

@@ -27,8 +27,9 @@
#include <linux/uaccess.h>
#include "internal.h"
#ifndef __GENSYMS__
#ifndef __GENKSYMS__
#include <trace/hooks/syscall_check.h>
#include <trace/hooks/mm.h>
#endif
/**
@@ -598,6 +599,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
{
gfp_t kmalloc_flags = flags;
void *ret;
bool use_vmalloc = false;
/*
* vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
@@ -606,6 +608,9 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
if ((flags & GFP_KERNEL) != GFP_KERNEL)
return kmalloc_node(size, flags, node);
trace_android_vh_kvmalloc_node_use_vmalloc(size, &kmalloc_flags, &use_vmalloc);
if (use_vmalloc)
goto use_vmalloc_node;
/*
* We want to attempt a large physically contiguous block first because
* it is less likely to fragment multiple larger blocks and therefore
@@ -635,6 +640,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
return NULL;
}
use_vmalloc_node:
return __vmalloc_node(size, 1, flags, node,
__builtin_return_address(0));
}

View File

@@ -66,6 +66,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/vmscan.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(mm_vmscan_direct_reclaim_begin);
EXPORT_TRACEPOINT_SYMBOL_GPL(mm_vmscan_direct_reclaim_end);
#undef CREATE_TRACE_POINTS
#include <trace/hooks/vmscan.h>
@@ -1943,6 +1946,25 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
}
#ifdef CONFIG_CMA
/*
* It is waste of effort to scan and reclaim CMA pages if it is not available
* for current allocation context. Kswapd can not be enrolled as it can not
* distinguish this scenario by using sc->gfp_mask = GFP_KERNEL
*/
static bool skip_cma(struct page *page, struct scan_control *sc)
{
return !current_is_kswapd() &&
gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
get_pageblock_migratetype(page) == MIGRATE_CMA;
}
#else
static bool skip_cma(struct page *page, struct scan_control *sc)
{
return false;
}
#endif
/*
* Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
*
@@ -1989,7 +2011,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
nr_pages = compound_nr(page);
total_scan += nr_pages;
if (page_zonenum(page) > sc->reclaim_idx) {
if (page_zonenum(page) > sc->reclaim_idx ||
skip_cma(page, sc)) {
nr_skipped[page_zonenum(page)] += nr_pages;
move_to = &pages_skipped;
goto move;

View File

@@ -8873,7 +8873,7 @@ static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev,
struct cfg80211_chan_def *chandef;
chandef = wdev_chandef(wdev, link_id);
if (!chandef)
if (!chandef || !chandef->chan)
continue;
/*
@@ -10608,6 +10608,8 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[NL80211_ATTR_MLD_ADDR])
return -EINVAL;
req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
if (!is_valid_ether_addr(req.ap_mld_addr))
return -EINVAL;
}
req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
@@ -10765,8 +10767,7 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device *rdev,
const u8 *ssid, int ssid_len,
struct nlattr **attrs,
const u8 **bssid_out)
struct nlattr **attrs)
{
struct ieee80211_channel *chan;
struct cfg80211_bss *bss;
@@ -10793,7 +10794,6 @@ static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device
if (!bss)
return ERR_PTR(-ENOENT);
*bssid_out = bssid;
return bss;
}
@@ -10803,7 +10803,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
struct net_device *dev = info->user_ptr[1];
struct cfg80211_assoc_request req = {};
struct nlattr **attrs = NULL;
const u8 *bssid, *ssid;
const u8 *ap_addr, *ssid;
unsigned int link_id;
int err, ssid_len;
@@ -10940,6 +10940,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
ap_addr = req.ap_mld_addr;
attrs = kzalloc(attrsize, GFP_KERNEL);
if (!attrs)
@@ -10965,8 +10966,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
goto free;
}
req.links[link_id].bss =
nl80211_assoc_bss(rdev, ssid, ssid_len, attrs,
&bssid);
nl80211_assoc_bss(rdev, ssid, ssid_len, attrs);
if (IS_ERR(req.links[link_id].bss)) {
err = PTR_ERR(req.links[link_id].bss);
req.links[link_id].bss = NULL;
@@ -11017,10 +11017,10 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
if (req.link_id >= 0)
return -EINVAL;
req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs,
&bssid);
req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs);
if (IS_ERR(req.bss))
return PTR_ERR(req.bss);
ap_addr = req.bss->bssid;
}
err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
@@ -11033,7 +11033,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
dev->ieee80211_ptr->conn_owner_nlportid =
info->snd_portid;
memcpy(dev->ieee80211_ptr->disconnect_bssid,
bssid, ETH_ALEN);
ap_addr, ETH_ALEN);
}
wdev_unlock(dev->ieee80211_ptr);

View File

@@ -2,7 +2,7 @@
/*
* Portions of this file
* Copyright(c) 2016-2017 Intel Deutschland GmbH
* Copyright (C) 2018, 2021-2022 Intel Corporation
* Copyright (C) 2018, 2021-2023 Intel Corporation
*/
#ifndef __CFG80211_RDEV_OPS
#define __CFG80211_RDEV_OPS
@@ -1441,8 +1441,8 @@ rdev_del_intf_link(struct cfg80211_registered_device *rdev,
unsigned int link_id)
{
trace_rdev_del_intf_link(&rdev->wiphy, wdev, link_id);
if (rdev->ops->add_intf_link)
rdev->ops->add_intf_link(&rdev->wiphy, wdev, link_id);
if (rdev->ops->del_intf_link)
rdev->ops->del_intf_link(&rdev->wiphy, wdev, link_id);
trace_rdev_return_void(&rdev->wiphy);
}

View File

@@ -207,52 +207,6 @@ static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
skb->mark = 0;
}
static int xfrmi_input(struct sk_buff *skb, int nexthdr, __be32 spi,
int encap_type, unsigned short family)
{
struct sec_path *sp;
sp = skb_sec_path(skb);
if (sp && (sp->len || sp->olen) &&
!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
goto discard;
XFRM_SPI_SKB_CB(skb)->family = family;
if (family == AF_INET) {
XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
} else {
XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
}
return xfrm_input(skb, nexthdr, spi, encap_type);
discard:
kfree_skb(skb);
return 0;
}
static int xfrmi4_rcv(struct sk_buff *skb)
{
return xfrmi_input(skb, ip_hdr(skb)->protocol, 0, 0, AF_INET);
}
static int xfrmi6_rcv(struct sk_buff *skb)
{
return xfrmi_input(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
0, 0, AF_INET6);
}
static int xfrmi4_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
{
return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET);
}
static int xfrmi6_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
{
return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET6);
}
static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
{
const struct xfrm_mode *inner_mode;
@@ -820,8 +774,8 @@ static struct pernet_operations xfrmi_net_ops = {
};
static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
.handler = xfrmi6_rcv,
.input_handler = xfrmi6_input,
.handler = xfrm6_rcv,
.input_handler = xfrm_input,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi6_err,
.priority = 10,
@@ -871,8 +825,8 @@ static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = {
#endif
static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
.handler = xfrmi4_rcv,
.input_handler = xfrmi4_input,
.handler = xfrm4_rcv,
.input_handler = xfrm_input,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi4_err,
.priority = 10,

View File

@@ -172,6 +172,19 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
}
EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
unsigned long _snd_pcm_stream_lock_irqsave_nested(struct snd_pcm_substream *substream)
{
unsigned long flags = 0;
if (substream->pcm->nonatomic)
mutex_lock_nested(&substream->self_group.mutex,
SINGLE_DEPTH_NESTING);
else
spin_lock_irqsave_nested(&substream->self_group.lock, flags,
SINGLE_DEPTH_NESTING);
return flags;
}
EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave_nested);
/**
* snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
* @substream: PCM substream

View File

@@ -2339,7 +2339,7 @@ int snd_soc_register_card(struct snd_soc_card *card)
mutex_init(&card->mutex);
mutex_init(&card->dapm_mutex);
mutex_init(&card->pcm_mutex);
spin_lock_init(&card->dpcm_lock);
spin_lock_init(&card->unused);
return snd_soc_bind_card(card);
}

View File

@@ -27,6 +27,37 @@
#include <sound/soc-link.h>
#include <sound/initval.h>
static inline void snd_soc_dpcm_mutex_lock(struct snd_soc_pcm_runtime *rtd)
{
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
}
static inline void snd_soc_dpcm_mutex_unlock(struct snd_soc_pcm_runtime *rtd)
{
mutex_unlock(&rtd->card->pcm_mutex);
}
#define snd_soc_dpcm_mutex_assert_held(rtd) \
lockdep_assert_held(&(rtd)->card->pcm_mutex)
static inline void snd_soc_dpcm_stream_lock_irq(struct snd_soc_pcm_runtime *rtd,
int stream)
{
snd_pcm_stream_lock_irq(snd_soc_dpcm_get_substream(rtd, stream));
}
#define snd_soc_dpcm_stream_lock_irqsave_nested(rtd, stream, flags) \
snd_pcm_stream_lock_irqsave_nested(snd_soc_dpcm_get_substream(rtd, stream), flags)
static inline void snd_soc_dpcm_stream_unlock_irq(struct snd_soc_pcm_runtime *rtd,
int stream)
{
snd_pcm_stream_unlock_irq(snd_soc_dpcm_get_substream(rtd, stream));
}
#define snd_soc_dpcm_stream_unlock_irqrestore(rtd, stream, flags) \
snd_pcm_stream_unlock_irqrestore(snd_soc_dpcm_get_substream(rtd, stream), flags)
#define DPCM_MAX_BE_USERS 8
static inline const char *soc_cpu_dai_name(struct snd_soc_pcm_runtime *rtd)
@@ -73,7 +104,6 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
struct snd_pcm_hw_params *params = &fe->dpcm[stream].hw_params;
struct snd_soc_dpcm *dpcm;
ssize_t offset = 0;
unsigned long flags;
/* FE state */
offset += scnprintf(buf + offset, size - offset,
@@ -101,7 +131,6 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
goto out;
}
spin_lock_irqsave(&fe->card->dpcm_lock, flags);
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
params = &dpcm->hw_params;
@@ -122,7 +151,6 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
params_channels(params),
params_rate(params));
}
spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
out:
return offset;
}
@@ -145,11 +173,13 @@ static ssize_t dpcm_state_read_file(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
snd_soc_dpcm_mutex_lock(fe);
for_each_pcm_streams(stream)
if (snd_soc_dai_stream_valid(asoc_rtd_to_cpu(fe, 0), stream))
offset += dpcm_show_state(fe, stream,
buf + offset,
out_count - offset);
snd_soc_dpcm_mutex_unlock(fe);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, offset);
@@ -221,14 +251,14 @@ static void dpcm_set_fe_update_state(struct snd_soc_pcm_runtime *fe,
struct snd_pcm_substream *substream =
snd_soc_dpcm_get_substream(fe, stream);
snd_pcm_stream_lock_irq(substream);
snd_soc_dpcm_stream_lock_irq(fe, stream);
if (state == SND_SOC_DPCM_UPDATE_NO && fe->dpcm[stream].trigger_pending) {
dpcm_fe_dai_do_trigger(substream,
fe->dpcm[stream].trigger_pending - 1);
fe->dpcm[stream].trigger_pending = 0;
}
fe->dpcm[stream].runtime_update = state;
snd_pcm_stream_unlock_irq(substream);
snd_soc_dpcm_stream_unlock_irq(fe, stream);
}
static void dpcm_set_be_update_state(struct snd_soc_pcm_runtime *be,
@@ -256,7 +286,7 @@ void snd_soc_runtime_action(struct snd_soc_pcm_runtime *rtd,
struct snd_soc_dai *dai;
int i;
lockdep_assert_held(&rtd->card->pcm_mutex);
snd_soc_dpcm_mutex_assert_held(rtd);
for_each_rtd_dais(rtd, i, dai)
snd_soc_dai_action(dai, stream, action);
@@ -309,6 +339,8 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
{
struct snd_soc_dpcm *dpcm;
snd_soc_dpcm_mutex_assert_held(fe);
for_each_dpcm_be(fe, dir, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
@@ -646,14 +678,14 @@ static int soc_pcm_components_close(struct snd_pcm_substream *substream,
return ret;
}
static int soc_pcm_clean(struct snd_pcm_substream *substream, int rollback)
static int soc_pcm_clean(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream, int rollback)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_component *component;
struct snd_soc_dai *dai;
int i;
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
snd_soc_dpcm_mutex_assert_held(rtd);
if (!rollback)
snd_soc_runtime_deactivate(rtd, substream->stream);
@@ -665,9 +697,6 @@ static int soc_pcm_clean(struct snd_pcm_substream *substream, int rollback)
soc_pcm_components_close(substream, rollback);
mutex_unlock(&rtd->card->pcm_mutex);
snd_soc_pcm_component_pm_runtime_put(rtd, substream, rollback);
for_each_rtd_components(rtd, i, component)
@@ -682,9 +711,21 @@ static int soc_pcm_clean(struct snd_pcm_substream *substream, int rollback)
* freed here. The cpu DAI, codec DAI, machine and components are also
* shutdown.
*/
static int __soc_pcm_close(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream)
{
return soc_pcm_clean(rtd, substream, 0);
}
/* PCM close ops for non-DPCM streams */
static int soc_pcm_close(struct snd_pcm_substream *substream)
{
return soc_pcm_clean(substream, 0);
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
snd_soc_dpcm_mutex_lock(rtd);
__soc_pcm_close(rtd, substream);
snd_soc_dpcm_mutex_unlock(rtd);
return 0;
}
static int soc_hw_sanity_check(struct snd_pcm_substream *substream)
@@ -730,21 +771,21 @@ config_err:
* then initialized and any private data can be allocated. This also calls
* startup for the cpu DAI, component, machine and codec DAI.
*/
static int soc_pcm_open(struct snd_pcm_substream *substream)
static int __soc_pcm_open(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_component *component;
struct snd_soc_dai *dai;
int i, ret = 0;
snd_soc_dpcm_mutex_assert_held(rtd);
for_each_rtd_components(rtd, i, component)
pinctrl_pm_select_default_state(component->dev);
ret = snd_soc_pcm_component_pm_runtime_get(rtd, substream);
if (ret < 0)
goto pm_err;
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
goto err;
ret = soc_pcm_components_open(substream);
if (ret < 0)
@@ -786,16 +827,26 @@ dynamic:
snd_soc_runtime_activate(rtd, substream->stream);
ret = 0;
err:
mutex_unlock(&rtd->card->pcm_mutex);
pm_err:
if (ret < 0) {
soc_pcm_clean(substream, 1);
soc_pcm_clean(rtd, substream, 1);
dev_err(rtd->dev, "%s() failed (%d)", __func__, ret);
}
return ret;
}
/* PCM open ops for non-DPCM streams */
static int soc_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
int ret;
snd_soc_dpcm_mutex_lock(rtd);
ret = __soc_pcm_open(rtd, substream);
snd_soc_dpcm_mutex_unlock(rtd);
return ret;
}
static void codec2codec_close_delayed_work(struct snd_soc_pcm_runtime *rtd)
{
/*
@@ -811,13 +862,13 @@ static void codec2codec_close_delayed_work(struct snd_soc_pcm_runtime *rtd)
* rate, etc. This function is non atomic and can be called multiple times,
* it can refer to the runtime info.
*/
static int soc_pcm_prepare(struct snd_pcm_substream *substream)
static int __soc_pcm_prepare(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *dai;
int i, ret = 0;
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
snd_soc_dpcm_mutex_assert_held(rtd);
ret = snd_soc_link_prepare(substream);
if (ret < 0)
@@ -845,14 +896,24 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
snd_soc_dai_digital_mute(dai, 0, substream->stream);
out:
mutex_unlock(&rtd->card->pcm_mutex);
if (ret < 0)
dev_err(rtd->dev, "ASoC: %s() failed (%d)\n", __func__, ret);
return ret;
}
/* PCM prepare ops for non-DPCM streams */
static int soc_pcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
int ret;
snd_soc_dpcm_mutex_lock(rtd);
ret = __soc_pcm_prepare(rtd, substream);
snd_soc_dpcm_mutex_unlock(rtd);
return ret;
}
static void soc_pcm_codec_params_fixup(struct snd_pcm_hw_params *params,
unsigned int mask)
{
@@ -864,13 +925,13 @@ static void soc_pcm_codec_params_fixup(struct snd_pcm_hw_params *params,
interval->max = channels;
}
static int soc_pcm_hw_clean(struct snd_pcm_substream *substream, int rollback)
static int soc_pcm_hw_clean(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream, int rollback)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *dai;
int i;
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
snd_soc_dpcm_mutex_assert_held(rtd);
/* clear the corresponding DAIs parameters when going to be inactive */
for_each_rtd_dais(rtd, i, dai) {
@@ -900,16 +961,28 @@ static int soc_pcm_hw_clean(struct snd_pcm_substream *substream, int rollback)
snd_soc_dai_hw_free(dai, substream, rollback);
}
mutex_unlock(&rtd->card->pcm_mutex);
return 0;
}
/*
* Frees resources allocated by hw_params, can be called multiple times
*/
static int __soc_pcm_hw_free(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream)
{
return soc_pcm_hw_clean(rtd, substream, 0);
}
/* hw_free PCM ops for non-DPCM streams */
static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
{
return soc_pcm_hw_clean(substream, 0);
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
int ret;
snd_soc_dpcm_mutex_lock(rtd);
ret = __soc_pcm_hw_free(rtd, substream);
snd_soc_dpcm_mutex_unlock(rtd);
return ret;
}
/*
@@ -917,15 +990,15 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
* function can also be called multiple times and can allocate buffers
* (using snd_pcm_lib_* ). It's non-atomic.
*/
static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
static int __soc_pcm_hw_params(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *cpu_dai;
struct snd_soc_dai *codec_dai;
int i, ret = 0;
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
snd_soc_dpcm_mutex_assert_held(rtd);
ret = soc_pcm_params_symmetry(substream, params);
if (ret)
@@ -997,16 +1070,27 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
ret = snd_soc_pcm_component_hw_params(substream, params);
out:
mutex_unlock(&rtd->card->pcm_mutex);
if (ret < 0) {
soc_pcm_hw_clean(substream, 1);
soc_pcm_hw_clean(rtd, substream, 1);
dev_err(rtd->dev, "ASoC: %s() failed (%d)\n", __func__, ret);
}
return ret;
}
/* hw_params PCM ops for non-DPCM streams */
static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
int ret;
snd_soc_dpcm_mutex_lock(rtd);
ret = __soc_pcm_hw_params(rtd, substream, params);
snd_soc_dpcm_mutex_unlock(rtd);
return ret;
}
static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
@@ -1126,7 +1210,8 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
struct snd_pcm_substream *fe_substream;
struct snd_pcm_substream *be_substream;
struct snd_soc_dpcm *dpcm;
unsigned long flags;
snd_soc_dpcm_mutex_assert_held(fe);
/* only add new dpcms */
for_each_dpcm_be(fe, stream, dpcm) {
@@ -1156,10 +1241,10 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
dpcm->fe = fe;
be->dpcm[stream].runtime = fe->dpcm[stream].runtime;
dpcm->state = SND_SOC_DPCM_LINK_STATE_NEW;
spin_lock_irqsave(&fe->card->dpcm_lock, flags);
snd_soc_dpcm_stream_lock_irq(fe, stream);
list_add(&dpcm->list_be, &fe->dpcm[stream].be_clients);
list_add(&dpcm->list_fe, &be->dpcm[stream].fe_clients);
spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
snd_soc_dpcm_stream_unlock_irq(fe, stream);
dev_dbg(fe->dev, "connected new DPCM %s path %s %s %s\n",
stream ? "capture" : "playback", fe->dai_link->name,
@@ -1204,8 +1289,11 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm, *d;
unsigned long flags;
LIST_HEAD(deleted_dpcms);
snd_soc_dpcm_mutex_assert_held(fe);
snd_soc_dpcm_stream_lock_irq(fe, stream);
for_each_dpcm_be_safe(fe, stream, dpcm, d) {
dev_dbg(fe->dev, "ASoC: BE %s disconnect check for %s\n",
stream ? "capture" : "playback",
@@ -1221,12 +1309,16 @@ void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
/* BEs still alive need new FE */
dpcm_be_reparent(fe, dpcm->be, stream);
dpcm_remove_debugfs_state(dpcm);
spin_lock_irqsave(&fe->card->dpcm_lock, flags);
list_del(&dpcm->list_be);
list_move(&dpcm->list_fe, &deleted_dpcms);
}
snd_soc_dpcm_stream_unlock_irq(fe, stream);
while (!list_empty(&deleted_dpcms)) {
dpcm = list_first_entry(&deleted_dpcms, struct snd_soc_dpcm,
list_fe);
list_del(&dpcm->list_fe);
spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
dpcm_remove_debugfs_state(dpcm);
kfree(dpcm);
}
}
@@ -1445,12 +1537,9 @@ int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm;
unsigned long flags;
spin_lock_irqsave(&fe->card->dpcm_lock, flags);
for_each_dpcm_be(fe, stream, dpcm)
dpcm_set_be_update_state(dpcm->be, stream, SND_SOC_DPCM_UPDATE_NO);
spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
}
void dpcm_be_dai_stop(struct snd_soc_pcm_runtime *fe, int stream,
@@ -1486,12 +1575,12 @@ void dpcm_be_dai_stop(struct snd_soc_pcm_runtime *fe, int stream,
continue;
if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) {
soc_pcm_hw_free(be_substream);
__soc_pcm_hw_free(be, be_substream);
be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE;
}
}
soc_pcm_close(be_substream);
__soc_pcm_close(be, be_substream);
be_substream->runtime = NULL;
be->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
}
@@ -1539,7 +1628,7 @@ int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
stream ? "capture" : "playback", be->dai_link->name);
be_substream->runtime = be->dpcm[stream].runtime;
err = soc_pcm_open(be_substream);
err = __soc_pcm_open(be, be_substream);
if (err < 0) {
be->dpcm[stream].users--;
if (be->dpcm[stream].users < 0)
@@ -1550,7 +1639,7 @@ int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
be->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
goto unwind;
}
be->dpcm_be_start[stream] = 0;
be->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
count++;
}
@@ -1787,7 +1876,7 @@ static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream)
dev_dbg(fe->dev, "ASoC: open FE %s\n", fe->dai_link->name);
/* start the DAI frontend */
ret = soc_pcm_open(fe_substream);
ret = __soc_pcm_open(fe, fe_substream);
if (ret < 0)
goto unwind;
@@ -1818,6 +1907,8 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
int stream = substream->stream;
snd_soc_dpcm_mutex_assert_held(fe);
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
/* shutdown the BEs */
@@ -1826,7 +1917,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
dev_dbg(fe->dev, "ASoC: close FE %s\n", fe->dai_link->name);
/* now shutdown the frontend */
soc_pcm_close(substream);
__soc_pcm_close(fe, substream);
/* run the stream stop event */
dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
@@ -1871,7 +1962,7 @@ void dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
dev_dbg(be->dev, "ASoC: hw_free BE %s\n",
be->dai_link->name);
soc_pcm_hw_free(be_substream);
__soc_pcm_hw_free(be, be_substream);
be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE;
}
@@ -1882,13 +1973,13 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
int stream = substream->stream;
mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
snd_soc_dpcm_mutex_lock(fe);
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
dev_dbg(fe->dev, "ASoC: hw_free FE %s\n", fe->dai_link->name);
/* call hw_free on the frontend */
soc_pcm_hw_free(substream);
soc_pcm_hw_clean(fe, substream, 0);
/* only hw_params backends that are either sinks or sources
* to this frontend DAI */
@@ -1897,7 +1988,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream)
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE;
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
mutex_unlock(&fe->card->mutex);
snd_soc_dpcm_mutex_unlock(fe);
return 0;
}
@@ -1941,7 +2032,7 @@ int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
dev_dbg(be->dev, "ASoC: hw_params BE %s\n",
be->dai_link->name);
ret = soc_pcm_hw_params(be_substream, &dpcm->hw_params);
ret = __soc_pcm_hw_params(be, be_substream, &dpcm->hw_params);
if (ret < 0)
goto unwind;
@@ -1971,7 +2062,7 @@ unwind:
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
continue;
soc_pcm_hw_free(be_substream);
__soc_pcm_hw_free(be, be_substream);
}
return ret;
@@ -1983,7 +2074,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
int ret, stream = substream->stream;
mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
snd_soc_dpcm_mutex_lock(fe);
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
memcpy(&fe->dpcm[stream].hw_params, params,
@@ -1997,7 +2088,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream,
params_channels(params), params_format(params));
/* call hw_params on the frontend */
ret = soc_pcm_hw_params(substream, params);
ret = __soc_pcm_hw_params(fe, substream, params);
if (ret < 0)
dpcm_be_dai_hw_free(fe, stream);
else
@@ -2005,7 +2096,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream,
out:
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
mutex_unlock(&fe->card->mutex);
snd_soc_dpcm_mutex_unlock(fe);
if (ret < 0)
dev_err(fe->dev, "ASoC: %s failed (%d)\n", __func__, ret);
@@ -2018,6 +2109,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
{
struct snd_soc_pcm_runtime *be;
struct snd_soc_dpcm *dpcm;
unsigned long flags;
int ret = 0;
for_each_dpcm_be(fe, stream, dpcm) {
@@ -2026,89 +2118,128 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
be = dpcm->be;
be_substream = snd_soc_dpcm_get_substream(be, stream);
snd_soc_dpcm_stream_lock_irqsave_nested(be, stream, flags);
/* is this op for this BE ? */
if (!snd_soc_dpcm_be_can_update(fe, be, stream))
continue;
goto next;
dev_dbg(be->dev, "ASoC: trigger BE %s cmd %d\n",
be->dai_link->name, cmd);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
if (!be->dpcm_be_start[stream] &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
continue;
goto next;
be->dpcm_be_start[stream]++;
if (be->dpcm_be_start[stream] != 1)
goto next;
ret = soc_pcm_trigger(be_substream, cmd);
if (ret)
goto end;
if (ret) {
be->dpcm_be_start[stream]--;
goto next;
}
be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
break;
case SNDRV_PCM_TRIGGER_RESUME:
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
continue;
goto next;
be->dpcm_be_start[stream]++;
if (be->dpcm_be_start[stream]!= 1)
goto next;
ret = soc_pcm_trigger(be_substream, cmd);
if (ret)
goto end;
if (ret) {
be->dpcm_be_start[stream]--;
goto next;
}
be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
continue;
if (!be->dpcm_be_start[stream] &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
goto next;
be->dpcm_be_start[stream]++;
if (be->dpcm_be_start[stream] != 1)
goto next;
ret = soc_pcm_trigger(be_substream, cmd);
if (ret)
goto end;
if (ret) {
be->dpcm_be_start[stream]--;
goto next;
}
be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
break;
case SNDRV_PCM_TRIGGER_STOP:
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
continue;
goto next;
if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
continue;
if (be->dpcm[stream].state == SND_SOC_DPCM_STATE_START)
be->dpcm_be_start[stream]--;
if (be->dpcm_be_start[stream] != 0)
goto next;
ret = soc_pcm_trigger(be_substream, cmd);
if (ret)
goto end;
if (ret) {
if (be->dpcm[stream].state == SND_SOC_DPCM_STATE_START)
be->dpcm_be_start[stream]++;
goto next;
}
be->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
continue;
goto next;
if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
continue;
be->dpcm_be_start[stream]--;
if (be->dpcm_be_start[stream] != 0)
goto next;
ret = soc_pcm_trigger(be_substream, cmd);
if (ret)
goto end;
if (ret) {
be->dpcm_be_start[stream]++;
goto next;
}
be->dpcm[stream].state = SND_SOC_DPCM_STATE_SUSPEND;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
continue;
goto next;
if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
continue;
be->dpcm_be_start[stream]--;
if (be->dpcm_be_start[stream] != 0)
goto next;
ret = soc_pcm_trigger(be_substream, cmd);
if (ret)
goto end;
if (ret) {
be->dpcm_be_start[stream]++;
goto next;
}
be->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
break;
}
next:
snd_soc_dpcm_stream_unlock_irqrestore(be, stream, flags);
if (ret)
break;
}
end:
if (ret < 0)
dev_err(fe->dev, "ASoC: %s() failed at %s (%d)\n",
__func__, be->dai_link->name, ret);
@@ -2279,7 +2410,7 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
dev_dbg(be->dev, "ASoC: prepare BE %s\n",
be->dai_link->name);
ret = soc_pcm_prepare(be_substream);
ret = __soc_pcm_prepare(be, be_substream);
if (ret < 0)
break;
@@ -2297,7 +2428,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
int stream = substream->stream, ret = 0;
mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
snd_soc_dpcm_mutex_lock(fe);
dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name);
@@ -2316,7 +2447,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
goto out;
/* call prepare on the frontend */
ret = soc_pcm_prepare(substream);
ret = __soc_pcm_prepare(fe, substream);
if (ret < 0)
goto out;
@@ -2324,7 +2455,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
out:
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
mutex_unlock(&fe->card->mutex);
snd_soc_dpcm_mutex_unlock(fe);
if (ret < 0)
dev_err(fe->dev, "ASoC: %s() failed (%d)\n", __func__, ret);
@@ -2375,7 +2506,6 @@ static int dpcm_run_update_startup(struct snd_soc_pcm_runtime *fe, int stream)
struct snd_soc_dpcm *dpcm;
enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
int ret = 0;
unsigned long flags;
dev_dbg(fe->dev, "ASoC: runtime %s open on FE %s\n",
stream ? "capture" : "playback", fe->dai_link->name);
@@ -2444,7 +2574,6 @@ close:
dpcm_be_dai_shutdown(fe, stream);
disconnect:
/* disconnect any pending BEs */
spin_lock_irqsave(&fe->card->dpcm_lock, flags);
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
@@ -2456,7 +2585,6 @@ disconnect:
be->dpcm[stream].state == SND_SOC_DPCM_STATE_NEW)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
}
spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
if (ret < 0)
dev_err(fe->dev, "ASoC: %s() failed (%d)\n", __func__, ret);
@@ -2531,7 +2659,7 @@ int snd_soc_dpcm_runtime_update(struct snd_soc_card *card)
struct snd_soc_pcm_runtime *fe;
int ret = 0;
mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
mutex_lock_nested(&card->pcm_mutex, card->pcm_subclass);
/* shutdown all old paths first */
for_each_card_rtds(card, fe) {
ret = soc_dpcm_fe_runtime_update(fe, 0);
@@ -2547,7 +2675,7 @@ int snd_soc_dpcm_runtime_update(struct snd_soc_card *card)
}
out:
mutex_unlock(&card->mutex);
mutex_unlock(&card->pcm_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dpcm_runtime_update);
@@ -2558,6 +2686,8 @@ static void dpcm_fe_dai_cleanup(struct snd_pcm_substream *fe_substream)
struct snd_soc_dpcm *dpcm;
int stream = fe_substream->stream;
snd_soc_dpcm_mutex_assert_held(fe);
/* mark FE's links ready to prune */
for_each_dpcm_be(fe, stream, dpcm)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
@@ -2572,12 +2702,12 @@ static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream)
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream);
int ret;
mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
snd_soc_dpcm_mutex_lock(fe);
ret = dpcm_fe_dai_shutdown(fe_substream);
dpcm_fe_dai_cleanup(fe_substream);
mutex_unlock(&fe->card->mutex);
snd_soc_dpcm_mutex_unlock(fe);
return ret;
}
@@ -2588,7 +2718,7 @@ static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream)
int ret;
int stream = fe_substream->stream;
mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
snd_soc_dpcm_mutex_lock(fe);
fe->dpcm[stream].runtime = fe_substream->runtime;
ret = dpcm_path_get(fe, stream, &list);
@@ -2605,7 +2735,7 @@ static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream)
dpcm_clear_pending_state(fe, stream);
dpcm_path_put(&list);
open_end:
mutex_unlock(&fe->card->mutex);
snd_soc_dpcm_mutex_unlock(fe);
return ret;
}
@@ -2866,10 +2996,8 @@ static int snd_soc_dpcm_check_state(struct snd_soc_pcm_runtime *fe,
struct snd_soc_dpcm *dpcm;
int state;
int ret = 1;
unsigned long flags;
int i;
spin_lock_irqsave(&fe->card->dpcm_lock, flags);
for_each_dpcm_fe(be, stream, dpcm) {
if (dpcm->fe == fe)
@@ -2883,7 +3011,6 @@ static int snd_soc_dpcm_check_state(struct snd_soc_pcm_runtime *fe,
}
}
}
spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
/* it's safe to do this BE DAI */
return ret;