From b651124b92285fa644ca3aefe946f0d779b093e6 Mon Sep 17 00:00:00 2001 From: Pranay Varma Kopanati Date: Mon, 17 Jun 2024 13:28:38 +0530 Subject: [PATCH 01/18] msm: eva: Adding kref count for cvp_get_inst_from_id Adding count for instance Change-Id: I4505feb478c1c682ecf6a790d7cb804f70e50a1c Signed-off-by: Pranay Varma Kopanati --- drivers/media/platform/msm/cvp/hfi_response_handler.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/msm/cvp/hfi_response_handler.c b/drivers/media/platform/msm/cvp/hfi_response_handler.c index db857c210f3f..e9d61e382612 100644 --- a/drivers/media/platform/msm/cvp/hfi_response_handler.c +++ b/drivers/media/platform/msm/cvp/hfi_response_handler.c @@ -467,7 +467,7 @@ retry: } } - inst = match ? inst : NULL; + inst = match && kref_get_unless_zero(&inst->kref) ? inst : NULL; mutex_unlock(&core->lock); } else { if (core->state == CVP_CORE_UNINIT) @@ -519,7 +519,7 @@ static int hfi_process_session_cvp_msg(u32 device_id, sess_msg = kmem_cache_alloc(cvp_driver->msg_cache, GFP_KERNEL); if (sess_msg == NULL) { dprintk(CVP_ERR, "%s runs out msg cache memory\n", __func__); - return -ENOMEM; + goto error_no_mem; } memcpy(&sess_msg->pkt, pkt, get_msg_size(pkt)); @@ -542,11 +542,14 @@ static int hfi_process_session_cvp_msg(u32 device_id, info->response_type = HAL_NO_RESP; + cvp_put_inst(inst); return 0; error_handle_msg: spin_unlock(&sq->lock); kmem_cache_free(cvp_driver->msg_cache, sess_msg); +error_no_mem: + cvp_put_inst(inst); return -ENOMEM; } From bbf350b17508157c2d533cb85722bc064da7414c Mon Sep 17 00:00:00 2001 From: Pallavi Singh Date: Mon, 17 Jun 2024 23:16:56 +0530 Subject: [PATCH 02/18] msm: ep_pcie: Avoid writing req_L1_exit during dstate change Avoid setting this bit to make sure device stays in L1SS when D-state is changes to D3 Hot. Because of device not staying in L1SS the PCIe link was going through recovery all the time power consumption was higher than expected value. Change-Id: Id06996745171e62d3a9dbc499c693f8a9870b2ea Signed-off-by: Pallavi Singh --- drivers/platform/msm/ep_pcie/ep_pcie_core.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_core.c b/drivers/platform/msm/ep_pcie/ep_pcie_core.c index dcbee0bd18f7..d0b8e3f386a3 100644 --- a/drivers/platform/msm/ep_pcie/ep_pcie_core.c +++ b/drivers/platform/msm/ep_pcie/ep_pcie_core.c @@ -2352,7 +2352,6 @@ static irqreturn_t ep_pcie_handle_dstate_change_irq(int irq, void *data) EP_PCIE_DBG(dev, "PCIe V%d: No. %ld change to D3 state\n", dev->rev, dev->d3_counter); - ep_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, 0, BIT(1)); dev->pme_en_d3 = readl_relaxed(dev->dm_core + PCIE20_CON_STATUS) & PCIE20_MASK_PME_EN; @@ -2370,13 +2369,6 @@ static irqreturn_t ep_pcie_handle_dstate_change_irq(int irq, void *data) } else if (dstate == 0) { dev->l23_ready = false; dev->d0_counter++; - /* - * When device is trasistion back to D0 from D3hot - * (without D3cold), REQ_EXIT_L1 bit won't get cleared. - * And L1 would get blocked till next D3cold. - * So clear it explicitly during D0. - */ - ep_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, BIT(1), 0); atomic_set(&dev->host_wake_pending, 0); EP_PCIE_DBG(dev, From 143f500168a79dfe3f1323d736e89828f7a1e6b0 Mon Sep 17 00:00:00 2001 From: ptak Date: Thu, 11 Jul 2024 12:25:03 +0530 Subject: [PATCH 03/18] msm: cvp: OOB write fix due to integer underflow If FW send a pkt->size which is less than the sizeof packet structure then pkt->size - sizeof() would result into an integer underflow. Due to this the subsequent check would be bypassed and we will start write to an OOB memory. Change-Id: Icb3e4e6d64275592ceb6f747de653dcc1c65fec7 Signed-off-by: ptak --- drivers/media/platform/msm/cvp/hfi_response_handler.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/media/platform/msm/cvp/hfi_response_handler.c b/drivers/media/platform/msm/cvp/hfi_response_handler.c index db857c210f3f..04c4c201fefa 100644 --- a/drivers/media/platform/msm/cvp/hfi_response_handler.c +++ b/drivers/media/platform/msm/cvp/hfi_response_handler.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -559,7 +560,7 @@ static void hfi_process_sys_get_prop_image_version( int req_bytes; req_bytes = pkt->size - sizeof(*pkt); - if (req_bytes < version_string_size || + if (req_bytes < (signed int)version_string_size || !pkt->rg_property_data[1] || pkt->num_properties > 1) { dprintk(CVP_ERR, "%s: bad_pkt: %d\n", __func__, req_bytes); From 12a570651b002c870ada2982a8d80b936fd0f1b7 Mon Sep 17 00:00:00 2001 From: ptak Date: Thu, 11 Jul 2024 17:07:43 +0530 Subject: [PATCH 04/18] msm: eva: Fix UAF issue when remove module Should use different way to get the core info for different device. Change-Id: I8231d08afa75a1f47781f54ec2e5fa264820cc9e Signed-off-by: ptak --- drivers/media/platform/msm/cvp/cvp.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/media/platform/msm/cvp/cvp.c b/drivers/media/platform/msm/cvp/cvp.c index cf3e4b530d99..87adb4fbd975 100644 --- a/drivers/media/platform/msm/cvp/cvp.c +++ b/drivers/media/platform/msm/cvp/cvp.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -491,7 +492,11 @@ static int msm_cvp_remove(struct platform_device *pdev) return -EINVAL; } - core = dev_get_drvdata(&pdev->dev); + if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-cvp")) + core = dev_get_drvdata(&pdev->dev); + else + core = dev_get_drvdata(pdev->dev.parent); + if (!core) { dprintk(CVP_ERR, "%s invalid core", __func__); return -EINVAL; From c360f7cb0cd4d19a3d63d79842e2cea2df99dcff Mon Sep 17 00:00:00 2001 From: Manaf Meethalavalappu Pallikunhi Date: Wed, 5 Jun 2024 13:52:11 +0530 Subject: [PATCH 05/18] thermal: qcom: Add support to update tsens trip based on nvmem data Add support to detect higher thermal profile parts and update thermal zone trips dynamically based on nvmem cell data for tsens. Change-Id: I792c4f2736d10d68b45cc9b64c0ec08d185cf007 Signed-off-by: Manaf Meethalavalappu Pallikunhi --- drivers/thermal/msm-tsens.c | 90 +++++++++++++++++++++++++++++++++++++ drivers/thermal/tsens.h | 13 ++++++ 2 files changed, 103 insertions(+) diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c index c660a05761af..167d0fba08e3 100644 --- a/drivers/thermal/msm-tsens.c +++ b/drivers/thermal/msm-tsens.c @@ -1,10 +1,12 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include +#include #include #include #include @@ -205,10 +207,95 @@ static int get_device_tree_data(struct platform_device *pdev, return rc; } +static void tsens_thermal_zone_trip_update(struct tsens_device *tmdev, + struct thermal_zone_device *tz, + const struct thermal_trip *trip, int trip_id) +{ + int ret = 0; + u32 trip_delta = 0; + int trip_temp; + + if (trip->type == THERMAL_TRIP_CRITICAL) + return; + + if (strnstr(tz->type, "cpu", sizeof(tz->type))) + trip_delta = TSENS_ELEVATE_CPU_DELTA; + else + trip_delta = TSENS_ELEVATE_DELTA; + + trip_temp = trip->temperature + trip_delta; + if (tz->ops->set_trip_temp) { + ret = tz->ops->set_trip_temp(tz, trip_id, trip_temp); + if (ret) { + dev_err(tmdev->dev, "%s: failed to set trip%d for %s\n", + __func__, trip_id, tz->type); + return; + } + } + thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED); +} + +static int tsens_nvmem_trip_update(struct tsens_device *tmdev, + struct thermal_zone_device *tz) +{ + int i, num_trips = 0; + const struct thermal_trip *trips = NULL; + + if (strnstr(tz->type, "mdmss", sizeof(tz->type)) || + !strnstr(tz->governor->name, "step_wise", + sizeof(tz->governor->name))) + return 0; + + if (!tz->ops->set_trip_temp) { + dev_err(tmdev->dev, "%s: No set_trip_temp ops support for %s\n", + __func__, tz->type); + return -EINVAL; + } + + num_trips = of_thermal_get_ntrips(tz); + trips = of_thermal_get_trip_points(tz); + for (i = 0; i < num_trips; i++) + tsens_thermal_zone_trip_update(tmdev, tz, &trips[i], i); + + return 0; +} + +static bool tsens_is_nvmem_trip_update_needed(struct tsens_device *tmdev) +{ + int ret; + u32 chipinfo, tsens_jtag; + u8 tsens_feat_id; + + if (!of_property_read_bool(tmdev->dev->of_node, "nvmem-cells")) + return false; + + ret = nvmem_cell_read_u32(tmdev->dev, "tsens_chipinfo", &chipinfo); + if (ret) { + dev_err(tmdev->dev, + "%s: Not able to read tsens_chipinfo nvmem, ret:%d\n", + __func__, ret); + return false; + } + + tsens_jtag = chipinfo & GENMASK(19, 0); + tsens_feat_id = (chipinfo >> TSENS_FEAT_OFFSET) & GENMASK(7, 0); + dev_dbg(tmdev->dev, "chipinfo:0x%x tsens_jtag: 0x%x tsens_feat_id:0x%x", + chipinfo, tsens_jtag, tsens_feat_id); + if ((tsens_jtag == TSENS_CHIP_ID0 && tsens_feat_id == TSENS_FEAT_ID3) || + (tsens_jtag == TSENS_CHIP_ID1 && tsens_feat_id == TSENS_FEAT_ID4) || + (tsens_jtag == TSENS_CHIP_ID2 && tsens_feat_id == TSENS_FEAT_ID3) || + (tsens_jtag == TSENS_CHIP_ID3 && tsens_feat_id == TSENS_FEAT_ID2)) + return true; + + return false; +} + static int tsens_thermal_zone_register(struct tsens_device *tmdev) { int i = 0, sensor_missing = 0; + tmdev->need_trip_update = tsens_is_nvmem_trip_update_needed(tmdev); + for (i = 0; i < TSENS_MAX_SENSORS; i++) { tmdev->sensor[i].tmdev = tmdev; tmdev->sensor[i].hw_id = i; @@ -222,6 +309,9 @@ static int tsens_thermal_zone_register(struct tsens_device *tmdev) sensor_missing++; continue; } + if (tmdev->need_trip_update) + tsens_nvmem_trip_update(tmdev, + tmdev->sensor[i].tzd); } else { pr_debug("Sensor not enabled:%d\n", i); } diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h index ddb7e232aa67..c5479bcee61a 100644 --- a/drivers/thermal/tsens.h +++ b/drivers/thermal/tsens.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __QCOM_TSENS_H__ @@ -35,6 +36,17 @@ #define TSENS_DRIVER_NAME "msm-tsens" +#define TSENS_FEAT_OFFSET 20 +#define TSENS_CHIP_ID0 0x197 +#define TSENS_CHIP_ID1 0x198 +#define TSENS_CHIP_ID2 0x20e +#define TSENS_CHIP_ID3 0x20f +#define TSENS_FEAT_ID2 0x2 +#define TSENS_FEAT_ID3 0x3 +#define TSENS_FEAT_ID4 0x4 +#define TSENS_ELEVATE_DELTA 10000 +#define TSENS_ELEVATE_CPU_DELTA 5000 + enum tsens_trip_type { TSENS_TRIP_CONFIGURABLE_HI = 4, TSENS_TRIP_CONFIGURABLE_LOW @@ -218,6 +230,7 @@ struct tsens_device { int trdy_fail_ctr; struct tsens_sensor zeroc; u8 zeroc_sensor_id; + bool need_trip_update; struct workqueue_struct *tsens_reinit_work; struct work_struct therm_fwk_notify; bool tsens_reinit_wa; From ed97e9cf1d7ef8b9c5d34f40db1b32bfce0ce6f4 Mon Sep 17 00:00:00 2001 From: Daniel Perez-Zoghbi Date: Mon, 22 Jul 2024 16:13:32 -0700 Subject: [PATCH 06/18] qcedev: fix UAF in qcedev_smmu External researcher found UAF in qcedev_smmu.c on an error condition in qcedev_check_and_map_buffer. When an error occurs, we free binfo, but it is still kept in the registeredbufs list. The fix removes it from the list before freeing binfo. Change-Id: I0327e456bd46106b12c36a5a21305407aae428dd Signed-off-by: Daniel Perez-Zoghbi --- drivers/crypto/msm/qcedev_smmu.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/msm/qcedev_smmu.c b/drivers/crypto/msm/qcedev_smmu.c index 50cd945b0c73..8d4844becc85 100644 --- a/drivers/crypto/msm/qcedev_smmu.c +++ b/drivers/crypto/msm/qcedev_smmu.c @@ -350,8 +350,12 @@ int qcedev_check_and_map_buffer(void *handle, return 0; unmap: - if (!found) + if (!found) { qcedev_unmap_buffer(handle, mem_client, binfo); + mutex_lock(&qce_hndl->registeredbufs.lock); + list_del(&binfo->list); + mutex_unlock(&qce_hndl->registeredbufs.lock); + } error: kfree(binfo); From 01f564dfc631489d652fe1fdacb8f096e31390d6 Mon Sep 17 00:00:00 2001 From: Nishant Pandey Date: Tue, 23 Jul 2024 22:31:28 -0700 Subject: [PATCH 07/18] securemsm-kernel: Decrement the server object ref count in mutex context Decrement the smcinvoke server object refcount in mutex context so that we never get an object which is being freed. Change-Id: I1bab3d630436923c7eb60f2d46dcc3f2bd037097 Signed-off-by: Nishant Pandey --- drivers/soc/qcom/smcinvoke.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c index 73f17e15da9f..3f59854e8259 100644 --- a/drivers/soc/qcom/smcinvoke.c +++ b/drivers/soc/qcom/smcinvoke.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #define pr_fmt(fmt) "smcinvoke: %s: " fmt, __func__ @@ -1866,8 +1866,11 @@ static long process_accept_req(struct file *filp, unsigned int cmd, } } while (!cb_txn); out: - if (server_info) + if (server_info) { + mutex_lock(&g_smcinvoke_lock); kref_put(&server_info->ref_cnt, destroy_cb_server); + mutex_unlock(&g_smcinvoke_lock); + } if (ret && ret != -ERESTARTSYS) pr_err("accept thread returning with ret: %d\n", ret); From 752e583b65efea2b18a10ba685cf722f8f1e4198 Mon Sep 17 00:00:00 2001 From: Himansu Nayak Date: Thu, 1 Aug 2024 18:33:16 +0530 Subject: [PATCH 08/18] msm_ipa: Install exception rule for PPPoE-MPLS Add code to install exception rule for icmp and dhcp packet in DL direction for v4 and v6. Change-Id: I27a2c3cb9cb342a5e4d22246e350bf721d784a15 Signed-off-by: Himansu Nayak --- include/uapi/linux/msm_ipa.h | 45 +++++++++++++++++++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index bd2a5920b654..8d2bb198379d 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -1140,6 +1140,48 @@ static inline const char *exception_type_as_str(enum ipa_exception_type t) "???"; } +/** + * Macro ipa_exception_type_pppoe + * + * This macro is for describing which field is to be looked at for + * exception path consideration. + * + * NOTE 1: The field implies an offset into the packet under + * consideration. This offset will be calculated on behalf of + * the user of this API. + * + * NOTE 2: When exceptions are generated/sent in an ipa_exception + * structure, they will considered to be from the upload + * perspective. And when appropriate, a corresponding, and + * perhaps inverted, downlink exception will be automatically + * created on the callers behalf. As an example: If a + * FIELD_UDP_SRC_PORT is sent, an uplink exception will be + * created for udp source port, and a corresponding + * FIELD_UDP_DST_PORT will be automatically created for the + * downlink. + */ +#define FIELD_IP_PROTOCOL_PPPOE (FIELD_ETHER_TYPE + 1) +#define FIELD_TCP_SRC_PORT_PPPOE (FIELD_IP_PROTOCOL_PPPOE + 1) +#define FIELD_TCP_DST_PORT_PPPOE (FIELD_TCP_SRC_PORT_PPPOE + 1) +#define FIELD_UDP_SRC_PORT_PPPOE (FIELD_TCP_DST_PORT_PPPOE + 1) +#define FIELD_UDP_DST_PORT_PPPOE (FIELD_UDP_SRC_PORT_PPPOE + 1) +#define FIELD_ETHER_TYPE_PPPOE (FIELD_UDP_DST_PORT_PPPOE + 1) +#define FIELD_PPPOE_MAX (FIELD_ETHER_TYPE_PPPOE + 1) + +/* Function to read PPPoE exception in string format */ +static inline const char *pppoe_exception_type_as_str(uint32_t t) +{ + return + (t == FIELD_IP_PROTOCOL_PPPOE) ? "pppoe_ip_protocol" : + (t == FIELD_TCP_SRC_PORT_PPPOE) ? "pppoe_tcp_src_port" : + (t == FIELD_TCP_DST_PORT_PPPOE) ? "pppoe_tcp_dst_port" : + (t == FIELD_UDP_SRC_PORT_PPPOE) ? "pppoe_udp_src_port" : + (t == FIELD_UDP_DST_PORT_PPPOE) ? "pppoe_udp_dst_port" : + (t == FIELD_ETHER_TYPE_PPPOE) ? "pppoe_ether_type" : + (t == FIELD_PPPOE_MAX) ? "pppoe_max" : + "???"; +} + #define IP_TYPE_EXCEPTION(x) \ ((x) == FIELD_IP_PROTOCOL || \ (x) == FIELD_TCP_SRC_PORT || \ @@ -1220,6 +1262,7 @@ struct ipa_field_val_equation_gen { * @payload_length: Payload length. * @ext_attrib_mask: Extended attributes. * @l2tp_udp_next_hdr: next header in L2TP tunneling + * @p_exception : exception to enable for mpls-pppoe * @field_val_equ: for finding a value at a particular offset */ struct ipa_rule_attrib { @@ -1265,7 +1308,7 @@ struct ipa_rule_attrib { __u16 payload_length; __u32 ext_attrib_mask; __u8 l2tp_udp_next_hdr; - __u8 padding1; + __u8 p_exception; struct ipa_field_val_equation_gen fld_val_eq; }; From d62bca7bf4f05954fd7006f79d166b7be1d27bff Mon Sep 17 00:00:00 2001 From: Qiang Yu Date: Fri, 28 Jul 2023 09:46:33 +0800 Subject: [PATCH 09/18] msm: ep_pcie: Prevent repetitive wake operation if wake is in process Sometimes, device receives two consecutive wake-up events, added into a workqueue. Then device assert WAKE# and host deassert PERST# if device in D3cold state, triggering deassert perst IRQ. In IRQ thread, device flush the workqueue to make sure previous d3cold process has completed before enable endpoint. commit 43917f862f7d ("msm: mhi_dev: Flush workqueue before processing PERST deassert"). However, the second wake event is also in the workqueue, so ep_pcie_core_wakeup_host_internal is invoked and seeing dev->perst_deast is true, setted by deassert PERST# IRQ. Then device goes to access MHI register to issue inband PME, leading to NOC error because endpoint is still disabled. So add a check to prevent wake operation if a previous wake has completed. 10567.834470: [0x8219195 mhi_sm_dev_event_manager] Handling MHI_DEV_EVENT_CORE_WAKEUP event, current states: M3 & D3_COLD_STATE 10567.834498: ep_pcie_core_toggle_wake_gpio: PCIe V1711211: No. 115 to assert PCIe WAKE#; perst is asserted; D3hot is received, WAKE GPIO state:0 10567.834507: ep_pcie_core_wakeup_host_internal: PCIe V1711211: Set wake pending : 1 and return ; perst is not de-asserted; D3hot is set 10567.849704: [0x8219195 mhi_dev_notify_sm_event] received: MHI_DEV_EVENT_HW_ACC_WAKEUP 10567.849976: ep_pcie_handle_perst_irq: PCIe V1711211: No. 1018 PERST deassertion 10567.850053: [0x8219195 mhi_sm_dev_event_manager] Handling MHI_DEV_EVENT_HW_ACC_WAKEUP event, current states: M3 & D3_COLD_STATE 10567.850071: ep_pcie_core_wakeup_host_internal: PCIe V1711211: request to assert WAKE# when in D3hot 10567.860093: ep_pcie_core_issue_inband_pme: PCIe V1711211: request to assert inband wake. Change-Id: I85fb37c4171c5ef4974c573f0abba199cb718a84 Signed-off-by: Qiang Yu --- drivers/platform/msm/ep_pcie/ep_pcie_core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_core.c b/drivers/platform/msm/ep_pcie/ep_pcie_core.c index d0b8e3f386a3..f330e21ac46f 100644 --- a/drivers/platform/msm/ep_pcie/ep_pcie_core.c +++ b/drivers/platform/msm/ep_pcie/ep_pcie_core.c @@ -3236,6 +3236,9 @@ static int ep_pcie_core_wakeup_host_internal(enum ep_pcie_event event) { struct ep_pcie_dev_t *dev = &ep_pcie_dev; + if (atomic_read(&dev->host_wake_pending)) + return 0; + if (!atomic_read(&dev->perst_deast)) { /*D3 cold handling*/ ep_pcie_core_toggle_wake_gpio(true); From ccba394a902e9a076a84f46667a2b2837f772910 Mon Sep 17 00:00:00 2001 From: Sai Chaitanya Kaveti Date: Fri, 15 Dec 2023 14:33:19 +0530 Subject: [PATCH 10/18] msm: ep_pcie: Avoid setting host wake pending flag for D0 In current implementation, when host wake request is received in D0 and M3 states, the following sequence of events are happening causing next host wake request from IPA/ client to fail. Sequence of events: 1. Device is in waking up process in D0, M3 states and expecting M0 next. 2. Wake up request received as device in M3. 3. Host wake API is executed setting host_wake_pending flag as well. 4. M0 received as part of wake up from 1. 5. Device in D0, M0 states. 6. Device again went to suspend state as no transfers are happening. 7. Device in D3cold, M3 states 8. Wake up request received from IPA. 9. Host wake API is called again but its returning without any operation as host_wake_pending flag is set. wake toggle is not done. 10. host_wake_pending flag is cleared only on receiving next D0. 11. Host wake requests are failing because of 9. To handle this, avoiding setting of host_wake_pending flag when the host wake request is received in D0 state. Change-Id: I83acde55e6c116653c3ed00e6b4560e3db6390bd Signed-off-by: Sai Chaitanya Kaveti --- drivers/platform/msm/ep_pcie/ep_pcie_core.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_core.c b/drivers/platform/msm/ep_pcie/ep_pcie_core.c index f330e21ac46f..07f605af0254 100644 --- a/drivers/platform/msm/ep_pcie/ep_pcie_core.c +++ b/drivers/platform/msm/ep_pcie/ep_pcie_core.c @@ -3236,8 +3236,10 @@ static int ep_pcie_core_wakeup_host_internal(enum ep_pcie_event event) { struct ep_pcie_dev_t *dev = &ep_pcie_dev; - if (atomic_read(&dev->host_wake_pending)) + if (atomic_read(&dev->host_wake_pending)) { + EP_PCIE_DBG(dev, "PCIe V%d: Host wake is already pending, returning\n", dev->rev); return 0; + } if (!atomic_read(&dev->perst_deast)) { /*D3 cold handling*/ @@ -3249,12 +3251,14 @@ static int ep_pcie_core_wakeup_host_internal(enum ep_pcie_event event) */ dev->wake_from_d3cold = true; } + atomic_set(&dev->host_wake_pending, 1); } else if (dev->l23_ready) { EP_PCIE_ERR(dev, "PCIe V%d: request to assert WAKE# when in D3hot\n", dev->rev); /*D3 hot handling*/ ep_pcie_core_issue_inband_pme(); + atomic_set(&dev->host_wake_pending, 1); } else { /*D0 handling*/ EP_PCIE_ERR(dev, @@ -3262,7 +3266,6 @@ static int ep_pcie_core_wakeup_host_internal(enum ep_pcie_event event) dev->rev); } - atomic_set(&dev->host_wake_pending, 1); EP_PCIE_DBG(dev, "PCIe V%d: Set wake pending : %d and return ; perst is %s de-asserted; D3hot is %s set\n", dev->rev, atomic_read(&dev->host_wake_pending), From 63a32bf3617369ad3546241a57753bebcbf67631 Mon Sep 17 00:00:00 2001 From: Prashanth K Date: Fri, 28 Apr 2023 12:37:42 +0530 Subject: [PATCH 11/18] usb: gadget: f_gsi: bail out if opts is null Currently, functions gsi_inst_clean & gsi_free_inst utilises gsi_opts without any check, however there is a possibility that the opts structure could become NULL. In such case, due to lack of if checks can result in NULL pointer dereference. Change-Id: I548690e2eee377b5292f258972ae7e38417f3085 Signed-off-by: Prashanth K --- drivers/usb/gadget/function/f_gsi.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index 1111b88c6516..fcfe9a9bdced 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -3849,6 +3849,9 @@ static struct config_item_type gsi_func_rmnet_type = { static void gsi_inst_clean(struct gsi_opts *opts) { + if (!opts) + return; + if (opts->gsi->c_port.cdev.dev) { struct cdev *cdev = &opts->gsi->c_port.cdev; struct f_gsi *gsi = opts->gsi; @@ -3962,7 +3965,7 @@ static void gsi_free_inst(struct usb_function_instance *f) enum ipa_usb_teth_prot prot_id; struct f_gsi *gsi; - if (!opts->gsi) + if (!opts || !opts->gsi) return; prot_id = opts->gsi->prot_id; From de9f4fe6f82c64f7a2d06d6ecd8d1edd398bf10e Mon Sep 17 00:00:00 2001 From: ANANDU KRISHNAN E Date: Tue, 20 Aug 2024 17:21:05 +0530 Subject: [PATCH 12/18] msm: adsprpc: Avoid taking reference for group_info Currently, the get_current_groups API accesses group info, which increases the usage refcount. If the IOCTL using the get_current_groups API is called many times, the usage counter overflows. To avoid this, access group info without taking a reference. A reference is not required as group info is not released during the IOCTL call. Change-Id: Ib4de80cac8b36f73d8f5c6dd9824722153189285 Signed-off-by: ANANDU KRISHNAN E --- drivers/char/adsprpc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 236a22608547..0664e70286e3 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -5806,7 +5806,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) static int fastrpc_get_process_gids(struct gid_list *gidlist) { - struct group_info *group_info = get_current_groups(); + struct group_info *group_info = current_cred()->group_info; int i = 0, err = 0, num_gids = group_info->ngroups + 1; unsigned int *gids = NULL; From c6e7698c0cf35551ab16d16ac5e21e2272644734 Mon Sep 17 00:00:00 2001 From: Santosh Sakore Date: Tue, 20 Aug 2024 12:31:31 +0530 Subject: [PATCH 13/18] adsprpc: Handle UAF scenario in put_args Currently, the DSP updates header buffers with unused DMA handle fds. In the put_args section, if any DMA handle FDs are present in the header buffer, the corresponding map is freed. However, since the header buffer is exposed to users in unsigned PD, users can update invalid FDs. If this invalid FD matches with any FD that is already in use, it could lead to a use-after-free (UAF) vulnerability. As a solution,add DMA handle references for DMA FDs, and the map for the FD will be freed only when a reference is found. Acked-by: Om Deore Change-Id: I19ae21230bf11fe89858b10c9069a5daccabc392 Signed-off-by: Santosh Sakore --- drivers/char/adsprpc.c | 71 +++++++++++++++++++++++++++++++----------- 1 file changed, 53 insertions(+), 18 deletions(-) diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 236a22608547..c13641940079 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -584,6 +584,8 @@ struct fastrpc_mmap { struct timespec64 map_end_time; bool is_filemap; /* flag to indicate map used in process init */ unsigned int ctx_refs; /* Indicates reference count for context map */ + /* Map in use for dma handle */ + unsigned int dma_handle_refs; }; enum fastrpc_perfkeys { @@ -1213,9 +1215,14 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, int fd, uintptr_t va, return 0; } hlist_for_each_entry_safe(map, n, &fl->maps, hn) { - /* Remove if only one reference map and no context map */ - if (map->refs == 1 && !map->ctx_refs && - map->raddr == va && map->raddr + map->len == va + len && + if ((fd < 0 || map->fd == fd) && + map->raddr == va && + map->raddr + map->len == va + len && + /* Remove if only one reference map and no context map */ + map->refs == 1 && + !map->ctx_refs && + /* Remove map only if it isn't being used by DSP */ + !map->dma_handle_refs && /* Remove map if not used in process initialization */ !map->is_filemap) { match = map; @@ -1254,8 +1261,9 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) if (map->flags == ADSP_MMAP_HEAP_ADDR || map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { spin_lock(&me->hlock); - map->refs--; - if (!map->refs && !map->is_persistent && !map->ctx_refs) + if (map->refs) + map->refs--; + if (!map->refs && !map->is_persistent) hlist_del_init(&map->hn); spin_unlock(&me->hlock); if (map->refs > 0) { @@ -1270,8 +1278,13 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) spin_unlock(&me->hlock); } } else { - map->refs--; - if (!map->refs && !map->ctx_refs) + if (map->refs) + map->refs--; + /* flags is passed as 1 during fastrpc_file_free + * (ie process exit), so that maps will be cleared + * even though references are present. + */ + if (!map->refs && !map->ctx_refs && !map->dma_handle_refs) hlist_del_init(&map->hn); if (map->refs > 0 && !flags) return; @@ -2492,12 +2505,13 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) FASTRPC_ATTR_NOVA, 0, 0, dmaflags, &ctx->maps[i]); if (!err && ctx->maps[i]) - ctx->maps[i]->ctx_refs++; + ctx->maps[i]->dma_handle_refs++; if (err) { for (j = bufs; j < i; j++) { - if (ctx->maps[j] && ctx->maps[j]->ctx_refs) - ctx->maps[j]->ctx_refs--; - fastrpc_mmap_free(ctx->maps[j], 0); + if (ctx->maps[j] && ctx->maps[j]->dma_handle_refs) { + ctx->maps[j]->dma_handle_refs--; + fastrpc_mmap_free(ctx->maps[j], 0); + } } mutex_unlock(&ctx->fl->map_mutex); goto bail; @@ -2635,13 +2649,33 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) rpra[i].buf.pv = buf; } PERF_END); + /* Since we are not holidng map_mutex during get args whole time + * it is possible that dma handle map may be removed by some invalid + * fd passed by DSP. Inside the lock check if the map present or not + */ + mutex_lock(&ctx->fl->map_mutex); for (i = bufs; i < bufs + handles; ++i) { - struct fastrpc_mmap *map = ctx->maps[i]; - if (map) { - pages[i].addr = map->phys; - pages[i].size = map->size; + struct fastrpc_mmap *mmap = NULL; + /* check if map was created */ + if (ctx->maps[i]) { + /* check if map still exist */ + if (!fastrpc_mmap_find(ctx->fl, ctx->fds[i], 0, 0, + 0, 0, &mmap)) { + if (mmap) { + pages[i].addr = mmap->phys; + pages[i].size = mmap->size; + } + + } else { + /* map already freed by some other call */ + mutex_unlock(&ctx->fl->map_mutex); + ADSPRPC_ERR("could not find map associated with dma handle fd %d\n", + ctx->fds[i]); + goto bail; + } } } + mutex_unlock(&ctx->fl->map_mutex); fdlist = (uint64_t *)&pages[bufs + handles]; crclist = (uint32_t *)&fdlist[M_FDLIST]; /* reset fds, crc and early wakeup hint memory */ @@ -2842,9 +2876,10 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx, break; if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0, 0, 0, &mmap)) { - if (mmap && mmap->ctx_refs) - mmap->ctx_refs--; - fastrpc_mmap_free(mmap, 0); + if (mmap && mmap->dma_handle_refs) { + mmap->dma_handle_refs = 0; + fastrpc_mmap_free(mmap, 0); + } } } mutex_unlock(&ctx->fl->map_mutex); From 54d3e3ca110802169d63fbc5d85c58c3d0bd99ad Mon Sep 17 00:00:00 2001 From: Rajashekar kuruva Date: Tue, 7 Nov 2023 11:27:49 +0530 Subject: [PATCH 14/18] USB: storage: Replace the sprintf with scnprintf 'sprintf' has been deprecated, hence replace it with a safer function scnprintf. Change-Id: I9bc8e3dfd2032a0447f38fc98a3ad31d9d609cab Signed-off-by: Rajashekar kuruva Signed-off-by: Prashanth K --- drivers/usb/storage/freecom.c | 16 +++++++------- drivers/usb/storage/sddr09.c | 39 ++++++++++++++++++++++------------- 2 files changed, 33 insertions(+), 22 deletions(-) diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c index 34e7eaff1174..cc0afbd3daba 100644 --- a/drivers/usb/storage/freecom.c +++ b/drivers/usb/storage/freecom.c @@ -494,12 +494,12 @@ static void pdump(struct us_data *us, void *ibuffer, int length) unsigned char *buffer = (unsigned char *) ibuffer; int i, j; int from, base; - + size_t size = sizeof(line); offset = 0; for (i = 0; i < length; i++) { if ((i & 15) == 0) { if (i > 0) { - offset += sprintf (line+offset, " - "); + offset += scnprintf(line+offset, size-offset, " - "); for (j = i - 16; j < i; j++) { if (buffer[j] >= 32 && buffer[j] <= 126) line[offset++] = buffer[j]; @@ -510,11 +510,11 @@ static void pdump(struct us_data *us, void *ibuffer, int length) usb_stor_dbg(us, "%s\n", line); offset = 0; } - offset += sprintf (line+offset, "%08x:", i); + offset += scnprintf(line+offset, size-offset, "%08x:", i); } else if ((i & 7) == 0) { - offset += sprintf (line+offset, " -"); + offset += scnprintf(line+offset, size-offset, " -"); } - offset += sprintf (line+offset, " %02x", buffer[i] & 0xff); + offset += scnprintf(line+offset, size-offset, " %02x", buffer[i] & 0xff); } /* Add the last "chunk" of data. */ @@ -522,10 +522,10 @@ static void pdump(struct us_data *us, void *ibuffer, int length) base = ((length - 1) / 16) * 16; for (i = from + 1; i < 16; i++) - offset += sprintf (line+offset, " "); + offset += scnprintf(line+offset, size-offset, " "); if (from < 8) - offset += sprintf (line+offset, " "); - offset += sprintf (line+offset, " - "); + offset += scnprintf(line+offset, size-offset, " "); + offset += scnprintf(line+offset, size-offset, " - "); for (i = 0; i <= from; i++) { if (buffer[base+i] >= 32 && buffer[base+i] <= 126) diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c index 51bcd4a43690..8d20eaac9e92 100644 --- a/drivers/usb/storage/sddr09.c +++ b/drivers/usb/storage/sddr09.c @@ -1139,6 +1139,8 @@ sddr09_get_cardinfo(struct us_data *us, unsigned char flags) { unsigned char deviceID[4]; char blurbtxt[256]; int result; + size_t size = sizeof(blurbtxt); + int len; usb_stor_dbg(us, "Reading capacity...\n"); @@ -1150,10 +1152,11 @@ sddr09_get_cardinfo(struct us_data *us, unsigned char flags) { return NULL; } - sprintf(blurbtxt, "sddr09: Found Flash card, ID = %4ph", deviceID); + scnprintf(blurbtxt, size, "sddr09: Found Flash card, ID = %4ph", deviceID); /* Byte 0 is the manufacturer */ - sprintf(blurbtxt + strlen(blurbtxt), + len = strlen(blurbtxt); + scnprintf(blurbtxt + len, size - len, ": Manuf. %s", nand_flash_manufacturer(deviceID[0])); @@ -1165,29 +1168,34 @@ sddr09_get_cardinfo(struct us_data *us, unsigned char flags) { * 17301504 raw bytes, of which 16384000 are * usable for user data. */ - sprintf(blurbtxt + strlen(blurbtxt), + len = strlen(blurbtxt); + scnprintf(blurbtxt + len, size - len, ", %d MB", 1<<(cardinfo->chipshift - 20)); } else { - sprintf(blurbtxt + strlen(blurbtxt), + len = strlen(blurbtxt); + scnprintf(blurbtxt + len, size - len, ", type unrecognized"); } /* Byte 2 is code to signal availability of 128-bit ID */ if (deviceID[2] == 0xa5) { - sprintf(blurbtxt + strlen(blurbtxt), + len = strlen(blurbtxt); + scnprintf(blurbtxt + len, size - len, ", 128-bit ID"); } /* Byte 3 announces the availability of another read ID command */ if (deviceID[3] == 0xc0) { - sprintf(blurbtxt + strlen(blurbtxt), + len = strlen(blurbtxt); + scnprintf(blurbtxt + len, size - len, ", extra cmd"); } - if (flags & SDDR09_WP) - sprintf(blurbtxt + strlen(blurbtxt), + if (flags & SDDR09_WP) { + len = strlen(blurbtxt); + scnprintf(blurbtxt + len, size - len, ", WP"); - + } printk(KERN_WARNING "%s\n", blurbtxt); return cardinfo; @@ -1538,7 +1546,8 @@ static int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us) { static unsigned char sensekey = 0, sensecode = 0; static unsigned char havefakesense = 0; - int result, i; + int result, i, len; + size_t size; unsigned char *ptr = us->iobuf; unsigned long capacity; unsigned int page, pages; @@ -1701,9 +1710,11 @@ static int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us) srb->cmnd[1] = LUNBITS; ptr[0] = 0; - for (i=0; i<12; i++) - sprintf(ptr+strlen(ptr), "%02X ", srb->cmnd[i]); - + size = sizeof(ptr); + for (i = 0; i < 12; i++) { + len = strlen(ptr); + scnprintf(ptr+len, size-len, "%02X ", srb->cmnd[i]); + } usb_stor_dbg(us, "Send control for command %s\n", ptr); result = sddr09_send_scsi_command(us, srb->cmnd, 12); @@ -1730,7 +1741,7 @@ static int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us) return (result == USB_STOR_XFER_GOOD ? USB_STOR_TRANSPORT_GOOD : USB_STOR_TRANSPORT_ERROR); - } + } return USB_STOR_TRANSPORT_GOOD; } From f0e3f6408834c27366c3bb21686e968acc1e7385 Mon Sep 17 00:00:00 2001 From: Divisha Bisht Date: Wed, 28 Aug 2024 10:41:53 +0530 Subject: [PATCH 15/18] msm-5.4.c3: qseecom: Fix possible race condition Fix possible race condition in data->type value in case of multithreaded listener or app IOCTLs. For example, below could cause inconsistent data->type value while racing belows IOCTLs Thread1 with QSEECOM_IOCTL_REGISTER_LISTENER_REQ Thread2 with QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ. Change-Id: I436b63c044a66c324d94db27566a7be70981bd6b Signed-off-by: Divisha Bisht --- drivers/misc/qseecom.c | 104 +++++++++++++++++++++++++++++------------ 1 file changed, 75 insertions(+), 29 deletions(-) diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index 2eeabe55d6ca..10dcb4691747 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -3,6 +3,7 @@ * QTI Secure Execution Environment Communicator (QSEECOM) driver * * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__ @@ -7619,14 +7620,15 @@ long qseecom_ioctl(struct file *file, switch (cmd) { case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: { + mutex_lock(&listener_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("reg lstnr req: invalid handle (%d)\n", data->type); + mutex_unlock(&listener_access_lock); ret = -EINVAL; break; } pr_debug("ioctl register_listener_req()\n"); - mutex_lock(&listener_access_lock); atomic_inc(&data->ioctl_count); data->type = QSEECOM_LISTENER_SERVICE; ret = qseecom_register_listener(data, argp); @@ -7638,15 +7640,16 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: { + mutex_lock(&listener_access_lock); if ((data->listener.id == 0) || (data->type != QSEECOM_LISTENER_SERVICE)) { pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n", data->type, data->listener.id); + mutex_unlock(&listener_access_lock); ret = -EINVAL; break; } pr_debug("ioctl unregister_listener_req()\n"); - mutex_lock(&listener_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_unregister_listener(data); atomic_dec(&data->ioctl_count); @@ -7657,15 +7660,16 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_SEND_CMD_REQ: { + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); if ((data->client.app_id == 0) || (data->type != QSEECOM_CLIENT_APP)) { pr_err("send cmd req: invalid handle (%d) app_id(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } - /* Only one client allowed here at a time */ - mutex_lock(&app_access_lock); if (qseecom.support_bus_scaling) { /* register bus bw in case the client doesn't do it */ if (!data->mode) { @@ -7719,15 +7723,16 @@ long qseecom_ioctl(struct file *file, } case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ: case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: { + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); if ((data->client.app_id == 0) || (data->type != QSEECOM_CLIENT_APP)) { pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } - /* Only one client allowed here at a time */ - mutex_lock(&app_access_lock); if (qseecom.support_bus_scaling) { if (!data->mode) { mutex_lock(&qsee_bw_mutex); @@ -7783,13 +7788,16 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_RECEIVE_REQ: { + mutex_lock(&listener_access_lock); if ((data->listener.id == 0) || (data->type != QSEECOM_LISTENER_SERVICE)) { pr_err("receive req: invalid handle (%d), lid(%d)\n", data->type, data->listener.id); + mutex_unlock(&listener_access_lock); ret = -EINVAL; break; } + mutex_unlock(&listener_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_receive_req(data); atomic_dec(&data->ioctl_count); @@ -7799,14 +7807,15 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_SEND_RESP_REQ: { + mutex_lock(&listener_access_lock); if ((data->listener.id == 0) || (data->type != QSEECOM_LISTENER_SERVICE)) { pr_err("send resp req: invalid handle (%d), lid(%d)\n", data->type, data->listener.id); + mutex_unlock(&listener_access_lock); ret = -EINVAL; break; } - mutex_lock(&listener_access_lock); atomic_inc(&data->ioctl_count); if (!qseecom.qsee_reentrancy_support) ret = qseecom_send_resp(); @@ -7820,16 +7829,17 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: { + mutex_lock(&app_access_lock); if ((data->type != QSEECOM_CLIENT_APP) && (data->type != QSEECOM_GENERIC) && (data->type != QSEECOM_SECURE_SERVICE)) { pr_err("set mem param req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data); - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_set_client_mem_param(data, argp); atomic_dec(&data->ioctl_count); @@ -7840,16 +7850,17 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_LOAD_APP_REQ: { + mutex_lock(&app_access_lock); if ((data->type != QSEECOM_GENERIC) && (data->type != QSEECOM_CLIENT_APP)) { pr_err("load app req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } data->type = QSEECOM_CLIENT_APP; pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data); - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_load_app(data, argp); atomic_dec(&data->ioctl_count); @@ -7860,15 +7871,16 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_UNLOAD_APP_REQ: { + mutex_lock(&app_access_lock); if ((data->client.app_id == 0) || (data->type != QSEECOM_CLIENT_APP)) { pr_err("unload app req:invalid handle(%d) app_id(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data); - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_unload_app(data, false); atomic_dec(&data->ioctl_count); @@ -7887,10 +7899,12 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_PERF_ENABLE_REQ:{ + mutex_lock(&app_access_lock); if ((data->type != QSEECOM_GENERIC) && (data->type != QSEECOM_CLIENT_APP)) { pr_err("perf enable req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } @@ -7898,6 +7912,7 @@ long qseecom_ioctl(struct file *file, (data->client.app_id == 0)) { pr_err("perf enable req:invalid handle(%d) appid(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } @@ -7912,13 +7927,16 @@ long qseecom_ioctl(struct file *file, pr_err("Fail to vote for clocks %d\n", ret); } atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); break; } case QSEECOM_IOCTL_PERF_DISABLE_REQ:{ + mutex_lock(&app_access_lock); if ((data->type != QSEECOM_SECURE_SERVICE) && (data->type != QSEECOM_CLIENT_APP)) { pr_err("perf disable req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } @@ -7926,6 +7944,7 @@ long qseecom_ioctl(struct file *file, (data->client.app_id == 0)) { pr_err("perf disable: invalid handle (%d)app_id(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } @@ -7939,6 +7958,7 @@ long qseecom_ioctl(struct file *file, mutex_unlock(&qsee_bw_mutex); } atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); break; } @@ -7948,28 +7968,32 @@ long qseecom_ioctl(struct file *file, pr_debug("crypto clock is not handled by HLOS\n"); break; } + mutex_lock(&app_access_lock); if ((data->client.app_id == 0) || (data->type != QSEECOM_CLIENT_APP)) { pr_err("set bus scale: invalid handle (%d) appid(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } atomic_inc(&data->ioctl_count); ret = qseecom_scale_bus_bandwidth(data, argp); atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); break; } case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: { + mutex_lock(&app_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("load ext elf req: invalid client handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } data->type = QSEECOM_UNAVAILABLE_CLIENT_APP; data->released = true; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_load_external_elf(data, argp); atomic_dec(&data->ioctl_count); @@ -7979,14 +8003,15 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: { + mutex_lock(&app_access_lock); if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) { pr_err("unload ext elf req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } data->released = true; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_unload_external_elf(data); atomic_dec(&data->ioctl_count); @@ -7996,15 +8021,16 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: { + mutex_lock(&app_access_lock); if ((data->type != QSEECOM_GENERIC) && (data->type != QSEECOM_CLIENT_APP)) { pr_err("app loaded query req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } data->type = QSEECOM_CLIENT_APP; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data); ret = qseecom_query_app_loaded(data, argp); @@ -8013,9 +8039,11 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: { + mutex_lock(&app_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("send cmd svc req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } @@ -8023,9 +8051,9 @@ long qseecom_ioctl(struct file *file, if (qseecom.qsee_version < QSEE_VERSION_03) { pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n", qseecom.qsee_version); + mutex_unlock(&app_access_lock); return -EINVAL; } - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_send_service_cmd(data, argp); atomic_dec(&data->ioctl_count); @@ -8035,19 +8063,21 @@ long qseecom_ioctl(struct file *file, case QSEECOM_IOCTL_CREATE_KEY_REQ: { if (!(qseecom.support_pfe || qseecom.support_fde)) pr_err("Features requiring key init not supported\n"); + mutex_lock(&app_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("create key req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } if (qseecom.qsee_version < QSEE_VERSION_05) { pr_err("Create Key feature unsupported: qsee ver %u\n", qseecom.qsee_version); + mutex_unlock(&app_access_lock); return -EINVAL; } data->released = true; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_create_key(data, argp); if (ret) @@ -8060,19 +8090,21 @@ long qseecom_ioctl(struct file *file, case QSEECOM_IOCTL_WIPE_KEY_REQ: { if (!(qseecom.support_pfe || qseecom.support_fde)) pr_err("Features requiring key init not supported\n"); + mutex_lock(&app_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("wipe key req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } if (qseecom.qsee_version < QSEE_VERSION_05) { pr_err("Wipe Key feature unsupported in qsee ver %u\n", qseecom.qsee_version); + mutex_unlock(&app_access_lock); return -EINVAL; } data->released = true; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_wipe_key(data, argp); if (ret) @@ -8084,19 +8116,21 @@ long qseecom_ioctl(struct file *file, case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: { if (!(qseecom.support_pfe || qseecom.support_fde)) pr_err("Features requiring key init not supported\n"); + mutex_lock(&app_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("update key req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } if (qseecom.qsee_version < QSEE_VERSION_05) { pr_err("Update Key feature unsupported in qsee ver %u\n", qseecom.qsee_version); + mutex_unlock(&app_access_lock); return -EINVAL; } data->released = true; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_update_key_user_info(data, argp); if (ret) @@ -8106,14 +8140,15 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: { + mutex_lock(&app_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("save part hash req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } data->released = true; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_save_partition_hash(argp); atomic_dec(&data->ioctl_count); @@ -8121,14 +8156,15 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: { + mutex_lock(&app_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("ES activated req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } data->released = true; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_is_es_activated(argp); atomic_dec(&data->ioctl_count); @@ -8136,14 +8172,15 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: { + mutex_lock(&app_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("MDTP cipher DIP req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } data->released = true; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_mdtp_cipher_dip(argp); atomic_dec(&data->ioctl_count); @@ -8152,14 +8189,15 @@ long qseecom_ioctl(struct file *file, } case QSEECOM_IOCTL_SEND_MODFD_RESP: case QSEECOM_IOCTL_SEND_MODFD_RESP_64: { + mutex_lock(&listener_access_lock); if ((data->listener.id == 0) || (data->type != QSEECOM_LISTENER_SERVICE)) { pr_err("receive req: invalid handle (%d), lid(%d)\n", data->type, data->listener.id); + mutex_unlock(&listener_access_lock); ret = -EINVAL; break; } - mutex_lock(&listener_access_lock); atomic_inc(&data->ioctl_count); if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP) ret = qseecom_send_modfd_resp(data, argp); @@ -8174,20 +8212,22 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: { + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); if ((data->client.app_id == 0) || (data->type != QSEECOM_CLIENT_APP)) { pr_err("Open session: invalid handle (%d) appid(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } if (qseecom.qsee_version < QSEE_VERSION_40) { pr_err("GP feature unsupported: qsee ver %u\n", qseecom.qsee_version); + mutex_unlock(&app_access_lock); return -EINVAL; } - /* Only one client allowed here at a time */ - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_qteec_open_session(data, argp); atomic_dec(&data->ioctl_count); @@ -8199,20 +8239,22 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: { + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); if ((data->client.app_id == 0) || (data->type != QSEECOM_CLIENT_APP)) { pr_err("Close session: invalid handle (%d) appid(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } if (qseecom.qsee_version < QSEE_VERSION_40) { pr_err("GP feature unsupported: qsee ver %u\n", qseecom.qsee_version); + mutex_unlock(&app_access_lock); return -EINVAL; } - /* Only one client allowed here at a time */ - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_qteec_close_session(data, argp); atomic_dec(&data->ioctl_count); @@ -8223,20 +8265,22 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: { + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); if ((data->client.app_id == 0) || (data->type != QSEECOM_CLIENT_APP)) { pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } if (qseecom.qsee_version < QSEE_VERSION_40) { pr_err("GP feature unsupported: qsee ver %u\n", qseecom.qsee_version); + mutex_unlock(&app_access_lock); return -EINVAL; } - /* Only one client allowed here at a time */ - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_qteec_invoke_modfd_cmd(data, argp); atomic_dec(&data->ioctl_count); @@ -8248,20 +8292,22 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: { + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); if ((data->client.app_id == 0) || (data->type != QSEECOM_CLIENT_APP)) { pr_err("Cancel req: invalid handle (%d) appid(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } if (qseecom.qsee_version < QSEE_VERSION_40) { pr_err("GP feature unsupported: qsee ver %u\n", qseecom.qsee_version); + mutex_unlock(&app_access_lock); return -EINVAL; } - /* Only one client allowed here at a time */ - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_qteec_request_cancellation(data, argp); atomic_dec(&data->ioctl_count); From 8e0a90efc05b404afc340684cc7ba5faadf3661b Mon Sep 17 00:00:00 2001 From: Santosh Sakore Date: Tue, 20 Aug 2024 12:31:31 +0530 Subject: [PATCH 16/18] adsprpc: Handle UAF scenario in put_args Currently, the DSP updates header buffers with unused DMA handle fds. In the put_args section, if any DMA handle FDs are present in the header buffer, the corresponding map is freed. However, since the header buffer is exposed to users in unsigned PD, users can update invalid FDs. If this invalid FD matches with any FD that is already in use, it could lead to a use-after-free (UAF) vulnerability. As a solution,add DMA handle references for DMA FDs, and the map for the FD will be freed only when a reference is found. Acked-by: Om Deore Change-Id: I19ae21230bf11fe89858b10c9069a5daccabc392 Signed-off-by: Santosh Sakore (cherry picked from commit c6e7698c0cf35551ab16d16ac5e21e2272644734) --- drivers/char/adsprpc.c | 71 +++++++++++++++++++++++++++++++----------- 1 file changed, 53 insertions(+), 18 deletions(-) diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 236a22608547..c13641940079 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -584,6 +584,8 @@ struct fastrpc_mmap { struct timespec64 map_end_time; bool is_filemap; /* flag to indicate map used in process init */ unsigned int ctx_refs; /* Indicates reference count for context map */ + /* Map in use for dma handle */ + unsigned int dma_handle_refs; }; enum fastrpc_perfkeys { @@ -1213,9 +1215,14 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, int fd, uintptr_t va, return 0; } hlist_for_each_entry_safe(map, n, &fl->maps, hn) { - /* Remove if only one reference map and no context map */ - if (map->refs == 1 && !map->ctx_refs && - map->raddr == va && map->raddr + map->len == va + len && + if ((fd < 0 || map->fd == fd) && + map->raddr == va && + map->raddr + map->len == va + len && + /* Remove if only one reference map and no context map */ + map->refs == 1 && + !map->ctx_refs && + /* Remove map only if it isn't being used by DSP */ + !map->dma_handle_refs && /* Remove map if not used in process initialization */ !map->is_filemap) { match = map; @@ -1254,8 +1261,9 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) if (map->flags == ADSP_MMAP_HEAP_ADDR || map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { spin_lock(&me->hlock); - map->refs--; - if (!map->refs && !map->is_persistent && !map->ctx_refs) + if (map->refs) + map->refs--; + if (!map->refs && !map->is_persistent) hlist_del_init(&map->hn); spin_unlock(&me->hlock); if (map->refs > 0) { @@ -1270,8 +1278,13 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) spin_unlock(&me->hlock); } } else { - map->refs--; - if (!map->refs && !map->ctx_refs) + if (map->refs) + map->refs--; + /* flags is passed as 1 during fastrpc_file_free + * (ie process exit), so that maps will be cleared + * even though references are present. + */ + if (!map->refs && !map->ctx_refs && !map->dma_handle_refs) hlist_del_init(&map->hn); if (map->refs > 0 && !flags) return; @@ -2492,12 +2505,13 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) FASTRPC_ATTR_NOVA, 0, 0, dmaflags, &ctx->maps[i]); if (!err && ctx->maps[i]) - ctx->maps[i]->ctx_refs++; + ctx->maps[i]->dma_handle_refs++; if (err) { for (j = bufs; j < i; j++) { - if (ctx->maps[j] && ctx->maps[j]->ctx_refs) - ctx->maps[j]->ctx_refs--; - fastrpc_mmap_free(ctx->maps[j], 0); + if (ctx->maps[j] && ctx->maps[j]->dma_handle_refs) { + ctx->maps[j]->dma_handle_refs--; + fastrpc_mmap_free(ctx->maps[j], 0); + } } mutex_unlock(&ctx->fl->map_mutex); goto bail; @@ -2635,13 +2649,33 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) rpra[i].buf.pv = buf; } PERF_END); + /* Since we are not holidng map_mutex during get args whole time + * it is possible that dma handle map may be removed by some invalid + * fd passed by DSP. Inside the lock check if the map present or not + */ + mutex_lock(&ctx->fl->map_mutex); for (i = bufs; i < bufs + handles; ++i) { - struct fastrpc_mmap *map = ctx->maps[i]; - if (map) { - pages[i].addr = map->phys; - pages[i].size = map->size; + struct fastrpc_mmap *mmap = NULL; + /* check if map was created */ + if (ctx->maps[i]) { + /* check if map still exist */ + if (!fastrpc_mmap_find(ctx->fl, ctx->fds[i], 0, 0, + 0, 0, &mmap)) { + if (mmap) { + pages[i].addr = mmap->phys; + pages[i].size = mmap->size; + } + + } else { + /* map already freed by some other call */ + mutex_unlock(&ctx->fl->map_mutex); + ADSPRPC_ERR("could not find map associated with dma handle fd %d\n", + ctx->fds[i]); + goto bail; + } } } + mutex_unlock(&ctx->fl->map_mutex); fdlist = (uint64_t *)&pages[bufs + handles]; crclist = (uint32_t *)&fdlist[M_FDLIST]; /* reset fds, crc and early wakeup hint memory */ @@ -2842,9 +2876,10 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx, break; if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0, 0, 0, &mmap)) { - if (mmap && mmap->ctx_refs) - mmap->ctx_refs--; - fastrpc_mmap_free(mmap, 0); + if (mmap && mmap->dma_handle_refs) { + mmap->dma_handle_refs = 0; + fastrpc_mmap_free(mmap, 0); + } } } mutex_unlock(&ctx->fl->map_mutex); From 20018f6ad21d6303bf944846106a428a9226bf39 Mon Sep 17 00:00:00 2001 From: ANANDU KRISHNAN E Date: Tue, 20 Aug 2024 17:21:05 +0530 Subject: [PATCH 17/18] msm: adsprpc: Avoid taking reference for group_info Currently, the get_current_groups API accesses group info, which increases the usage refcount. If the IOCTL using the get_current_groups API is called many times, the usage counter overflows. To avoid this, access group info without taking a reference. A reference is not required as group info is not released during the IOCTL call. Change-Id: Ib4de80cac8b36f73d8f5c6dd9824722153189285 Signed-off-by: ANANDU KRISHNAN E (cherry picked from commit de9f4fe6f82c64f7a2d06d6ecd8d1edd398bf10e) --- drivers/char/adsprpc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 236a22608547..0664e70286e3 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -5806,7 +5806,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) static int fastrpc_get_process_gids(struct gid_list *gidlist) { - struct group_info *group_info = get_current_groups(); + struct group_info *group_info = current_cred()->group_info; int i = 0, err = 0, num_gids = group_info->ngroups + 1; unsigned int *gids = NULL; From 3a617351ae80fd8dd268455501a585ef66b5d680 Mon Sep 17 00:00:00 2001 From: Divisha Bisht Date: Wed, 28 Aug 2024 10:41:53 +0530 Subject: [PATCH 18/18] msm-5.4.c3: qseecom: Fix possible race condition Fix possible race condition in data->type value in case of multithreaded listener or app IOCTLs. For example, below could cause inconsistent data->type value while racing belows IOCTLs Thread1 with QSEECOM_IOCTL_REGISTER_LISTENER_REQ Thread2 with QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ. Change-Id: I436b63c044a66c324d94db27566a7be70981bd6b Signed-off-by: Divisha Bisht (cherry picked from commit f0e3f6408834c27366c3bb21686e968acc1e7385) --- drivers/misc/qseecom.c | 104 +++++++++++++++++++++++++++++------------ 1 file changed, 75 insertions(+), 29 deletions(-) diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index 2eeabe55d6ca..10dcb4691747 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -3,6 +3,7 @@ * QTI Secure Execution Environment Communicator (QSEECOM) driver * * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__ @@ -7619,14 +7620,15 @@ long qseecom_ioctl(struct file *file, switch (cmd) { case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: { + mutex_lock(&listener_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("reg lstnr req: invalid handle (%d)\n", data->type); + mutex_unlock(&listener_access_lock); ret = -EINVAL; break; } pr_debug("ioctl register_listener_req()\n"); - mutex_lock(&listener_access_lock); atomic_inc(&data->ioctl_count); data->type = QSEECOM_LISTENER_SERVICE; ret = qseecom_register_listener(data, argp); @@ -7638,15 +7640,16 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: { + mutex_lock(&listener_access_lock); if ((data->listener.id == 0) || (data->type != QSEECOM_LISTENER_SERVICE)) { pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n", data->type, data->listener.id); + mutex_unlock(&listener_access_lock); ret = -EINVAL; break; } pr_debug("ioctl unregister_listener_req()\n"); - mutex_lock(&listener_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_unregister_listener(data); atomic_dec(&data->ioctl_count); @@ -7657,15 +7660,16 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_SEND_CMD_REQ: { + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); if ((data->client.app_id == 0) || (data->type != QSEECOM_CLIENT_APP)) { pr_err("send cmd req: invalid handle (%d) app_id(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } - /* Only one client allowed here at a time */ - mutex_lock(&app_access_lock); if (qseecom.support_bus_scaling) { /* register bus bw in case the client doesn't do it */ if (!data->mode) { @@ -7719,15 +7723,16 @@ long qseecom_ioctl(struct file *file, } case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ: case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: { + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); if ((data->client.app_id == 0) || (data->type != QSEECOM_CLIENT_APP)) { pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } - /* Only one client allowed here at a time */ - mutex_lock(&app_access_lock); if (qseecom.support_bus_scaling) { if (!data->mode) { mutex_lock(&qsee_bw_mutex); @@ -7783,13 +7788,16 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_RECEIVE_REQ: { + mutex_lock(&listener_access_lock); if ((data->listener.id == 0) || (data->type != QSEECOM_LISTENER_SERVICE)) { pr_err("receive req: invalid handle (%d), lid(%d)\n", data->type, data->listener.id); + mutex_unlock(&listener_access_lock); ret = -EINVAL; break; } + mutex_unlock(&listener_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_receive_req(data); atomic_dec(&data->ioctl_count); @@ -7799,14 +7807,15 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_SEND_RESP_REQ: { + mutex_lock(&listener_access_lock); if ((data->listener.id == 0) || (data->type != QSEECOM_LISTENER_SERVICE)) { pr_err("send resp req: invalid handle (%d), lid(%d)\n", data->type, data->listener.id); + mutex_unlock(&listener_access_lock); ret = -EINVAL; break; } - mutex_lock(&listener_access_lock); atomic_inc(&data->ioctl_count); if (!qseecom.qsee_reentrancy_support) ret = qseecom_send_resp(); @@ -7820,16 +7829,17 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: { + mutex_lock(&app_access_lock); if ((data->type != QSEECOM_CLIENT_APP) && (data->type != QSEECOM_GENERIC) && (data->type != QSEECOM_SECURE_SERVICE)) { pr_err("set mem param req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data); - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_set_client_mem_param(data, argp); atomic_dec(&data->ioctl_count); @@ -7840,16 +7850,17 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_LOAD_APP_REQ: { + mutex_lock(&app_access_lock); if ((data->type != QSEECOM_GENERIC) && (data->type != QSEECOM_CLIENT_APP)) { pr_err("load app req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } data->type = QSEECOM_CLIENT_APP; pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data); - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_load_app(data, argp); atomic_dec(&data->ioctl_count); @@ -7860,15 +7871,16 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_UNLOAD_APP_REQ: { + mutex_lock(&app_access_lock); if ((data->client.app_id == 0) || (data->type != QSEECOM_CLIENT_APP)) { pr_err("unload app req:invalid handle(%d) app_id(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data); - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_unload_app(data, false); atomic_dec(&data->ioctl_count); @@ -7887,10 +7899,12 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_PERF_ENABLE_REQ:{ + mutex_lock(&app_access_lock); if ((data->type != QSEECOM_GENERIC) && (data->type != QSEECOM_CLIENT_APP)) { pr_err("perf enable req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } @@ -7898,6 +7912,7 @@ long qseecom_ioctl(struct file *file, (data->client.app_id == 0)) { pr_err("perf enable req:invalid handle(%d) appid(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } @@ -7912,13 +7927,16 @@ long qseecom_ioctl(struct file *file, pr_err("Fail to vote for clocks %d\n", ret); } atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); break; } case QSEECOM_IOCTL_PERF_DISABLE_REQ:{ + mutex_lock(&app_access_lock); if ((data->type != QSEECOM_SECURE_SERVICE) && (data->type != QSEECOM_CLIENT_APP)) { pr_err("perf disable req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } @@ -7926,6 +7944,7 @@ long qseecom_ioctl(struct file *file, (data->client.app_id == 0)) { pr_err("perf disable: invalid handle (%d)app_id(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } @@ -7939,6 +7958,7 @@ long qseecom_ioctl(struct file *file, mutex_unlock(&qsee_bw_mutex); } atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); break; } @@ -7948,28 +7968,32 @@ long qseecom_ioctl(struct file *file, pr_debug("crypto clock is not handled by HLOS\n"); break; } + mutex_lock(&app_access_lock); if ((data->client.app_id == 0) || (data->type != QSEECOM_CLIENT_APP)) { pr_err("set bus scale: invalid handle (%d) appid(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } atomic_inc(&data->ioctl_count); ret = qseecom_scale_bus_bandwidth(data, argp); atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); break; } case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: { + mutex_lock(&app_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("load ext elf req: invalid client handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } data->type = QSEECOM_UNAVAILABLE_CLIENT_APP; data->released = true; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_load_external_elf(data, argp); atomic_dec(&data->ioctl_count); @@ -7979,14 +8003,15 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: { + mutex_lock(&app_access_lock); if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) { pr_err("unload ext elf req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } data->released = true; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_unload_external_elf(data); atomic_dec(&data->ioctl_count); @@ -7996,15 +8021,16 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: { + mutex_lock(&app_access_lock); if ((data->type != QSEECOM_GENERIC) && (data->type != QSEECOM_CLIENT_APP)) { pr_err("app loaded query req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } data->type = QSEECOM_CLIENT_APP; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data); ret = qseecom_query_app_loaded(data, argp); @@ -8013,9 +8039,11 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: { + mutex_lock(&app_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("send cmd svc req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } @@ -8023,9 +8051,9 @@ long qseecom_ioctl(struct file *file, if (qseecom.qsee_version < QSEE_VERSION_03) { pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n", qseecom.qsee_version); + mutex_unlock(&app_access_lock); return -EINVAL; } - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_send_service_cmd(data, argp); atomic_dec(&data->ioctl_count); @@ -8035,19 +8063,21 @@ long qseecom_ioctl(struct file *file, case QSEECOM_IOCTL_CREATE_KEY_REQ: { if (!(qseecom.support_pfe || qseecom.support_fde)) pr_err("Features requiring key init not supported\n"); + mutex_lock(&app_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("create key req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } if (qseecom.qsee_version < QSEE_VERSION_05) { pr_err("Create Key feature unsupported: qsee ver %u\n", qseecom.qsee_version); + mutex_unlock(&app_access_lock); return -EINVAL; } data->released = true; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_create_key(data, argp); if (ret) @@ -8060,19 +8090,21 @@ long qseecom_ioctl(struct file *file, case QSEECOM_IOCTL_WIPE_KEY_REQ: { if (!(qseecom.support_pfe || qseecom.support_fde)) pr_err("Features requiring key init not supported\n"); + mutex_lock(&app_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("wipe key req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } if (qseecom.qsee_version < QSEE_VERSION_05) { pr_err("Wipe Key feature unsupported in qsee ver %u\n", qseecom.qsee_version); + mutex_unlock(&app_access_lock); return -EINVAL; } data->released = true; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_wipe_key(data, argp); if (ret) @@ -8084,19 +8116,21 @@ long qseecom_ioctl(struct file *file, case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: { if (!(qseecom.support_pfe || qseecom.support_fde)) pr_err("Features requiring key init not supported\n"); + mutex_lock(&app_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("update key req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } if (qseecom.qsee_version < QSEE_VERSION_05) { pr_err("Update Key feature unsupported in qsee ver %u\n", qseecom.qsee_version); + mutex_unlock(&app_access_lock); return -EINVAL; } data->released = true; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_update_key_user_info(data, argp); if (ret) @@ -8106,14 +8140,15 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: { + mutex_lock(&app_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("save part hash req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } data->released = true; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_save_partition_hash(argp); atomic_dec(&data->ioctl_count); @@ -8121,14 +8156,15 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: { + mutex_lock(&app_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("ES activated req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } data->released = true; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_is_es_activated(argp); atomic_dec(&data->ioctl_count); @@ -8136,14 +8172,15 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: { + mutex_lock(&app_access_lock); if (data->type != QSEECOM_GENERIC) { pr_err("MDTP cipher DIP req: invalid handle (%d)\n", data->type); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } data->released = true; - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_mdtp_cipher_dip(argp); atomic_dec(&data->ioctl_count); @@ -8152,14 +8189,15 @@ long qseecom_ioctl(struct file *file, } case QSEECOM_IOCTL_SEND_MODFD_RESP: case QSEECOM_IOCTL_SEND_MODFD_RESP_64: { + mutex_lock(&listener_access_lock); if ((data->listener.id == 0) || (data->type != QSEECOM_LISTENER_SERVICE)) { pr_err("receive req: invalid handle (%d), lid(%d)\n", data->type, data->listener.id); + mutex_unlock(&listener_access_lock); ret = -EINVAL; break; } - mutex_lock(&listener_access_lock); atomic_inc(&data->ioctl_count); if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP) ret = qseecom_send_modfd_resp(data, argp); @@ -8174,20 +8212,22 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: { + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); if ((data->client.app_id == 0) || (data->type != QSEECOM_CLIENT_APP)) { pr_err("Open session: invalid handle (%d) appid(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } if (qseecom.qsee_version < QSEE_VERSION_40) { pr_err("GP feature unsupported: qsee ver %u\n", qseecom.qsee_version); + mutex_unlock(&app_access_lock); return -EINVAL; } - /* Only one client allowed here at a time */ - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_qteec_open_session(data, argp); atomic_dec(&data->ioctl_count); @@ -8199,20 +8239,22 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: { + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); if ((data->client.app_id == 0) || (data->type != QSEECOM_CLIENT_APP)) { pr_err("Close session: invalid handle (%d) appid(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } if (qseecom.qsee_version < QSEE_VERSION_40) { pr_err("GP feature unsupported: qsee ver %u\n", qseecom.qsee_version); + mutex_unlock(&app_access_lock); return -EINVAL; } - /* Only one client allowed here at a time */ - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_qteec_close_session(data, argp); atomic_dec(&data->ioctl_count); @@ -8223,20 +8265,22 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: { + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); if ((data->client.app_id == 0) || (data->type != QSEECOM_CLIENT_APP)) { pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } if (qseecom.qsee_version < QSEE_VERSION_40) { pr_err("GP feature unsupported: qsee ver %u\n", qseecom.qsee_version); + mutex_unlock(&app_access_lock); return -EINVAL; } - /* Only one client allowed here at a time */ - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_qteec_invoke_modfd_cmd(data, argp); atomic_dec(&data->ioctl_count); @@ -8248,20 +8292,22 @@ long qseecom_ioctl(struct file *file, break; } case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: { + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); if ((data->client.app_id == 0) || (data->type != QSEECOM_CLIENT_APP)) { pr_err("Cancel req: invalid handle (%d) appid(%d)\n", data->type, data->client.app_id); + mutex_unlock(&app_access_lock); ret = -EINVAL; break; } if (qseecom.qsee_version < QSEE_VERSION_40) { pr_err("GP feature unsupported: qsee ver %u\n", qseecom.qsee_version); + mutex_unlock(&app_access_lock); return -EINVAL; } - /* Only one client allowed here at a time */ - mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_qteec_request_cancellation(data, argp); atomic_dec(&data->ioctl_count);