diff --git a/qcom/opensource/mm-drivers/Android.bp b/qcom/opensource/mm-drivers/Android.bp new file mode 100644 index 0000000000..753cce932b --- /dev/null +++ b/qcom/opensource/mm-drivers/Android.bp @@ -0,0 +1,36 @@ +headers_src = [ + "sync_fence/include/uapi/*/**/*.h", +] + +mm_drivers_headers_out = [ + "sync_fence/qcom_sync_file.h", +] + +mm_drivers_kernel_headers_verbose = "--verbose " +genrule { + name: "qti_generate_mm_drivers_kernel_headers", + tools: [ + "headers_install.sh", + "unifdef" + ], + tool_files: [ + "mm_drivers_kernel_headers.py", + ], + srcs: headers_src, + cmd: "python3 $(location mm_drivers_kernel_headers.py) " + + mm_drivers_kernel_headers_verbose + + "--header_arch arm64 " + + "--gen_dir $(genDir) " + + "--mm_drivers_include_uapi $(locations sync_fence/include/uapi/*/**/*.h) " + + "--unifdef $(location unifdef) " + + "--headers_install $(location headers_install.sh)", + out: mm_drivers_headers_out, +} + +cc_library_headers { + name: "qti_mm_drivers_kernel_headers", + generated_headers: ["qti_generate_mm_drivers_kernel_headers"], + export_generated_headers: ["qti_generate_mm_drivers_kernel_headers"], + vendor: true, + recovery_available: true +} diff --git a/qcom/opensource/mm-drivers/Android.mk b/qcom/opensource/mm-drivers/Android.mk new file mode 100644 index 0000000000..86e3104278 --- /dev/null +++ b/qcom/opensource/mm-drivers/Android.mk @@ -0,0 +1,16 @@ +MM_DRIVER_PATH := $(call my-dir) + +MM_DRV_DLKM_ENABLE := true +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) + ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false) + MM_DRV_DLKM_ENABLE := false + endif +endif + +ifeq ($(MM_DRV_DLKM_ENABLE), true) + include $(MM_DRIVER_PATH)/msm_ext_display/Android.mk + ifneq ($(TARGET_BOARD_PLATFORM), taro) + include $(MM_DRIVER_PATH)/hw_fence/Android.mk + include $(MM_DRIVER_PATH)/sync_fence/Android.mk + endif +endif diff --git a/qcom/opensource/mm-drivers/BUILD.bazel b/qcom/opensource/mm-drivers/BUILD.bazel new file mode 100644 index 0000000000..fb6cad061f --- /dev/null +++ b/qcom/opensource/mm-drivers/BUILD.bazel @@ -0,0 +1,24 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") + +package( + default_visibility = [ + "//visibility:public", + ], +) + +ddk_headers( + name = "mm_drivers_configs", + hdrs = glob(["config/*.h"]), + includes = ["config"], +) + +ddk_headers( + name = "mm_drivers_headers", + hdrs = [ + ":mm_drivers_configs", + "//vendor/qcom/opensource/mm-drivers/hw_fence:hw_fence_headers", + "//vendor/qcom/opensource/mm-drivers/sync_fence:sync_fence_uapi_headers", + "//vendor/qcom/opensource/mm-drivers/msm_ext_display:msm_ext_display_headers", + "//vendor/qcom/opensource/mm-drivers/sync_fence:sync_fence_headers", + ], +) diff --git a/qcom/opensource/mm-drivers/config/kalamammdrivers.conf b/qcom/opensource/mm-drivers/config/kalamammdrivers.conf new file mode 100644 index 0000000000..4e657d38be --- /dev/null +++ b/qcom/opensource/mm-drivers/config/kalamammdrivers.conf @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. +# Copyright (c) 2020, The Linux Foundation. All rights reserved. + +export CONFIG_MSM_EXT_DISPLAY=y +export CONFIG_QCOM_SPEC_SYNC=y +export CONFIG_QTI_HW_FENCE=y diff --git a/qcom/opensource/mm-drivers/config/kalamammdriversconf.h b/qcom/opensource/mm-drivers/config/kalamammdriversconf.h new file mode 100644 index 0000000000..b9cb331bda --- /dev/null +++ b/qcom/opensource/mm-drivers/config/kalamammdriversconf.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_MSM_EXT_DISPLAY 1 +#define CONFIG_QCOM_SPEC_SYNC 1 +#define CONFIG_QTI_HW_FENCE 1 diff --git a/qcom/opensource/mm-drivers/hw_fence/Android.mk b/qcom/opensource/mm-drivers/hw_fence/Android.mk new file mode 100644 index 0000000000..149702d2d7 --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/Android.mk @@ -0,0 +1,42 @@ +LOCAL_PATH := $(call my-dir) +LOCAL_MODULE_DDK_BUILD := true +include $(CLEAR_VARS) + +# This makefile is only for DLKM +ifneq ($(findstring vendor,$(LOCAL_PATH)),) + +ifneq ($(findstring opensource,$(LOCAL_PATH)),) + MSM_HW_FENCE_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/hw_fence +endif # opensource + +DLKM_DIR := $(TOP)/device/qcom/common/dlkm + +LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) + +########################################################### +# This is set once per LOCAL_PATH, not per (kernel) module +KBUILD_OPTIONS := MSM_HW_FENCE_ROOT=$(MSM_HW_FENCE_BLD_DIR) +KBUILD_OPTIONS += MODNAME=msm_hw_fence +KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) + +########################################################### +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := hw-fence-module-symvers +LOCAL_MODULE_STEM := Module.symvers +LOCAL_MODULE_KBUILD_NAME := Module.symvers +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk +########################################################### +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := msm_hw_fence.ko +LOCAL_MODULE_KBUILD_NAME := msm_hw_fence.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk +########################################################### +endif # DLKM check diff --git a/qcom/opensource/mm-drivers/hw_fence/BUILD.bazel b/qcom/opensource/mm-drivers/hw_fence/BUILD.bazel new file mode 100644 index 0000000000..808c0ec9d3 --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/BUILD.bazel @@ -0,0 +1,16 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") +load(":define_hw_fence.bzl", "define_hw_fence") + +package( + default_visibility = [ + "//visibility:public" + ], +) + +ddk_headers( + name = "hw_fence_headers", + hdrs = glob(["include/*.h"]), + includes = ["include"] +) + +define_hw_fence() diff --git a/qcom/opensource/mm-drivers/hw_fence/Kbuild b/qcom/opensource/mm-drivers/hw_fence/Kbuild new file mode 100644 index 0000000000..2cf74d291b --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/Kbuild @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0-only + +KDIR := $(TOP)/kernel_platform/msm-kernel +include $(MSM_HW_FENCE_ROOT)/config/kalamammdrivers.conf +LINUXINCLUDE += -include $(MSM_HW_FENCE_ROOT)/config/kalamammdriversconf.h \ + -I$(MSM_HW_FENCE_ROOT)hw_fence/include/ + +ifdef CONFIG_QTI_HW_FENCE +obj-m += msm_hw_fence.o + +msm_hw_fence-y := src/msm_hw_fence.o \ + src/hw_fence_drv_priv.o \ + src/hw_fence_drv_utils.o \ + src/hw_fence_drv_debug.o \ + src/hw_fence_drv_ipc.o + +msm_hw_fence-$(CONFIG_DEBUG_FS) += src/hw_fence_ioctl.o + +CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" +endif +EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \ + -Wformat-invalid-specifier -Wformat-zero-length -Wnonnull \ No newline at end of file diff --git a/qcom/opensource/mm-drivers/hw_fence/Kconfig b/qcom/opensource/mm-drivers/hw_fence/Kconfig new file mode 100644 index 0000000000..bcf3546845 --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/Kconfig @@ -0,0 +1,22 @@ +config QTI_HW_FENCE + bool "HW Fence" + help + Enable the hw_fence module + +config QTI_HW_FENCE_USE_SYNX + bool "HW Fence uses synx" + help + Enable the hw_fence module through synx api. + This will enable hw-fence module to register + hw-fence ops with synx module to support hw- + fencing through synx api and inter-op + functionality between synx and hw-fence. + +config QTI_ENABLE_HW_FENCE_DEFAULT + bool "HW Fence is enabled by default" + help + Enable the hw_fence module by default. + This config allow hw-fence client registrations + by default without any fastboot commands. + HW-fencing can still be disabled and reenabled + at runtime through fastboot commands. diff --git a/qcom/opensource/mm-drivers/hw_fence/Makefile b/qcom/opensource/mm-drivers/hw_fence/Makefile new file mode 100644 index 0000000000..ac6afd73be --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +KBUILD_OPTIONS += MSM_HW_FENCE_ROOT=$(KERNEL_SRC)/$(M)/../ + +all: modules + +modules_install: + $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install + +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) + +clean: + rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers + rm -rf .tmp_versions diff --git a/qcom/opensource/mm-drivers/hw_fence/defconfig b/qcom/opensource/mm-drivers/hw_fence/defconfig new file mode 100644 index 0000000000..b39eb5efbe --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/defconfig @@ -0,0 +1,2 @@ +CONFIG_QTI_HW_FENCE=y +CONFIG_QTI_HW_FENCE_USE_SYNX=y diff --git a/qcom/opensource/mm-drivers/hw_fence/define_hw_fence.bzl b/qcom/opensource/mm-drivers/hw_fence/define_hw_fence.bzl new file mode 100644 index 0000000000..849bc62d13 --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/define_hw_fence.bzl @@ -0,0 +1,56 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_module", "ddk_submodule") +load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") +load("//msm-kernel:target_variants.bzl", "get_all_variants") + +def _define_module(target, variant): + tv = "{}_{}".format(target, variant) + if target in [ "pineapple" ]: + target_config = "defconfig" + else: + target_config = "{}_defconfig".format(target) + + ddk_module( + name = "{}_msm_hw_fence".format(tv), + srcs = [ + "src/hw_fence_drv_debug.c", + "src/hw_fence_drv_ipc.c", + "src/hw_fence_drv_priv.c", + "src/hw_fence_drv_utils.c", + "src/msm_hw_fence.c", + ], + out = "msm_hw_fence.ko", + defconfig = target_config, + kconfig = "Kconfig", + conditional_srcs = { + "CONFIG_DEBUG_FS": { + True: ["src/hw_fence_ioctl.c"], + }, + "CONFIG_QTI_HW_FENCE_USE_SYNX" : { + True: [ + "src/msm_hw_fence_synx_translation.c", + "src/hw_fence_drv_interop.c", + ] + }, + }, + deps = [ + "//msm-kernel:all_headers", + "//vendor/qcom/opensource/synx-kernel:synx_headers", + "//vendor/qcom/opensource/mm-drivers:mm_drivers_headers", + ], + kernel_build = "//msm-kernel:{}".format(tv), + ) + + copy_to_dist_dir( + name = "{}_msm_hw_fence_dist".format(tv), + data = [":{}_msm_hw_fence".format(tv)], + dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target), + flat = True, + wipe_dist_dir = False, + allow_duplicate_filenames = False, + mode_overrides = {"**/*": "644"}, + log = "info", + ) + +def define_hw_fence(): + for (t, v) in get_all_variants(): + _define_module(t, v) diff --git a/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_debug.h b/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_debug.h new file mode 100644 index 0000000000..6f447d1ff0 --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_debug.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __HW_FENCE_DRV_DEBUG +#define __HW_FENCE_DRV_DEBUG + +#include "hw_fence_drv_ipc.h" + +enum hw_fence_drv_prio { + HW_FENCE_HIGH = 0x000001, /* High density debug messages (noisy) */ + HW_FENCE_LOW = 0x000002, /* Low density debug messages */ + HW_FENCE_INFO = 0x000004, /* Informational prints */ + HW_FENCE_INIT = 0x00008, /* Initialization logs */ + HW_FENCE_QUEUE = 0x000010, /* Queue logs */ + HW_FENCE_LUT = 0x000020, /* Look-up and algorithm logs */ + HW_FENCE_IRQ = 0x000040, /* Interrupt-related messages */ + HW_FENCE_LOCK = 0x000080, /* Lock-related messages */ + HW_FENCE_SSR = 0x0000100, /* SSR-related messages */ + HW_FENCE_PRINTK = 0x010000, +}; + +extern u32 msm_hw_fence_debug_level; + +#define dprintk(__level, __fmt, ...) \ + do { \ + if (msm_hw_fence_debug_level & __level) \ + if (msm_hw_fence_debug_level & HW_FENCE_PRINTK) \ + pr_err(__fmt, ##__VA_ARGS__); \ + } while (0) + + +#define HWFNC_ERR(fmt, ...) \ + pr_err("[hwfence_error:%s:%d][%pS] "fmt, __func__, __LINE__, \ + __builtin_return_address(0), ##__VA_ARGS__) + +#define HWFNC_ERR_ONCE(fmt, ...) \ + pr_err_once("[hwfence_error:%s:%d][%pS] "fmt, __func__, __LINE__, \ + __builtin_return_address(0), ##__VA_ARGS__) + +#define HWFNC_DBG_H(fmt, ...) \ + dprintk(HW_FENCE_HIGH, "[hwfence_dbgh:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_L(fmt, ...) \ + dprintk(HW_FENCE_LOW, "[hwfence_dbgl:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_INFO(fmt, ...) \ + dprintk(HW_FENCE_INFO, "[hwfence_dbgi:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_INIT(fmt, ...) \ + dprintk(HW_FENCE_INIT, "[hwfence_dbg:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_Q(fmt, ...) \ + dprintk(HW_FENCE_QUEUE, "[hwfence_dbgq:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_LUT(fmt, ...) \ + dprintk(HW_FENCE_LUT, "[hwfence_dbglut:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_IRQ(fmt, ...) \ + dprintk(HW_FENCE_IRQ, "[hwfence_dbgirq:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_LOCK(fmt, ...) \ + dprintk(HW_FENCE_LOCK, "[hwfence_dbglock:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_SSR(fmt, ...) \ + dprintk(HW_FENCE_SSR, "[hwfence_dbgssr:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_DUMP(prio, fmt, ...) \ + dprintk(prio, "[hwfence_dbgd:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_WARN(fmt, ...) \ + pr_warn("[hwfence_warn:%s:%d][%pS] "fmt, __func__, __LINE__, \ + __builtin_return_address(0), ##__VA_ARGS__) + +int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data); +void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash, + u32 count); + +#if IS_ENABLED(CONFIG_DEBUG_FS) + +int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, int client_id); +int hw_fence_debug_wait_val(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 hash, u64 mask, + u64 timeout_ms, u32 *error); + +void hw_fence_debug_dump_queues(struct hw_fence_driver_data *drv_data, enum hw_fence_drv_prio prio, + struct msm_hw_fence_client *hw_fence_client); +void hw_fence_debug_dump_table(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data); +void hw_fence_debug_dump_events(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data); + +extern const struct file_operations hw_sync_debugfs_fops; + +struct hw_fence_out_clients_map { + int ipc_client_id_vid; /* ipc client virtual id for the hw fence client */ + int ipc_client_id_pid; /* ipc client physical id for the hw fence client */ + int ipc_signal_id; /* ipc signal id for the hw fence client */ +}; + +/* These signals are the ones that the actual clients should be triggering, hw-fence driver + * does not need to have knowledge of these signals. Adding them here for debugging purposes. + * Only fence controller and the cliens know these id's, since these + * are to trigger the ipcc from the 'client hw-core' to the 'hw-fence controller' + * The index of this struct must match the enum hw_fence_client_id + */ +static const struct hw_fence_out_clients_map + dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_ID_VAL6 + 1] = { + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 0}, /* CTRL_LOOPBACK */ + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0}, /* CTX0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 2}, /* CTL0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 4}, /* CTL1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 6}, /* CTL2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 8}, /* CTL3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 10}, /* CTL4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 12}, /* CTL5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21}, /* VAL0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22}, /* VAL1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23}, /* VAL2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24}, /* VAL3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25}, /* VAL4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26}, /* VAL5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27}, /* VAL6 */ +}; +#endif /* CONFIG_DEBUG_FS */ + +#endif /* __HW_FENCE_DRV_DEBUG */ diff --git a/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_fence.h b/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_fence.h new file mode 100644 index 0000000000..1a6689883d --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_fence.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __HW_FENCE_DRV_HW_DMA_FENCE +#define __HW_FENCE_DRV_HW_DMA_FENCE + +#define HW_FENCE_NAME_SIZE 64 + +/** + * struct hw_dma_fence - fences internally created by hw-fence driver. + * @base: base dma-fence structure, this must remain at beginning of the struct. + * @name: name of each fence. + * @client_handle: handle for the client owner of this fence, this is returned by the hw-fence + * driver after a successful registration of the client and used by this fence + * during release. + * @data: internal data to process the fence ops. + * @dma_fence_key: key for the dma-fence hash table. + * @is_internal: true if this fence is initialized internally by hw-fence driver, false otherwise + * @signal_cb: drv_data, hash, and signal_cb of hw_fence + * @node: node for fences held in the dma-fences hash table linked lists + */ +struct hw_dma_fence { + struct dma_fence base; + char name[HW_FENCE_NAME_SIZE]; + void *client_handle; + u32 dma_fence_key; + bool is_internal; + struct hw_fence_signal_cb signal_cb; + struct hlist_node node; +}; + +static inline struct hw_dma_fence *to_hw_dma_fence(struct dma_fence *fence) +{ + return container_of(fence, struct hw_dma_fence, base); +} + +static const char *hw_fence_dbg_get_driver_name(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); + + return hw_dma_fence->name; +} + +static const char *hw_fence_dbg_get_timeline_name(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); + + return hw_dma_fence->name; +} + +static bool hw_fence_dbg_enable_signaling(struct dma_fence *fence) +{ + return true; +} + +static void _hw_fence_release(struct hw_dma_fence *hw_dma_fence) +{ + int ret = 0; + + if (IS_ERR_OR_NULL(hw_dma_fence->client_handle) || (hw_dma_fence->is_internal && + IS_ERR_OR_NULL(hw_dma_fence->signal_cb.drv_data))) { + HWFNC_ERR("invalid hwfence data %pK %pK, won't release hw_fence!\n", + hw_dma_fence->client_handle, hw_dma_fence->signal_cb.drv_data); + return; + } + + /* release hw-fence */ + if (hw_dma_fence->is_internal) /* internally owned hw_dma_fence has its own refcount */ + ret = hw_fence_destroy_refcount(hw_dma_fence->signal_cb.drv_data, + hw_dma_fence->signal_cb.hash, HW_FENCE_DMA_FENCE_REFCOUNT); + else /* externally owned hw_dma_fence uses standard hlos refcount */ + ret = msm_hw_fence_destroy(hw_dma_fence->client_handle, &hw_dma_fence->base); + + if (ret) + HWFNC_ERR("failed to release hw_fence!\n"); +} + +static void hw_fence_dbg_release(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence; + + if (!fence) + return; + + HWFNC_DBG_H("release backing fence %pK\n", fence); + hw_dma_fence = to_hw_dma_fence(fence); + + if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) + _hw_fence_release(hw_dma_fence); + + kfree(fence->lock); + kfree(hw_dma_fence); +} + +static struct dma_fence_ops hw_fence_dbg_ops = { + .get_driver_name = hw_fence_dbg_get_driver_name, + .get_timeline_name = hw_fence_dbg_get_timeline_name, + .enable_signaling = hw_fence_dbg_enable_signaling, + .wait = dma_fence_default_wait, + .release = hw_fence_dbg_release, +}; + +static inline bool dma_fence_is_hw_dma(struct dma_fence *fence) +{ + return fence->ops == &hw_fence_dbg_ops; +} + +#endif /* __HW_FENCE_DRV_HW_DMA_FENCE */ diff --git a/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_interop.h b/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_interop.h new file mode 100644 index 0000000000..a56dfbc89e --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_interop.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __HW_FENCE_INTEROP_H +#define __HW_FENCE_INTEROP_H + +#include + +extern struct hw_fence_driver_data *hw_fence_drv_data; +extern struct synx_hwfence_interops synx_interops; + +/** + * HW_FENCE_HANDLE_INDEX_MASK: Mask to extract table index from hw-fence handle + */ +#define HW_FENCE_HANDLE_INDEX_MASK GENMASK(16, 0) + +/** + * hw_fence_interop_to_synx_status() - Converts hw-fence status code to synx status code + * + * @param code : hw-fence status code + * @return synx status code corresponding to hw-fence status code + */ +int hw_fence_interop_to_synx_status(int hw_fence_status_code); + +/** + * hw_fence_interop_to_synx_signal_status() - Converts hw-fence flags and error to + * synx signaling status + * + * @param flags : hw-fence flags + * @param error : hw-fence error + * + * @return synx signaling status + */ +u32 hw_fence_interop_to_synx_signal_status(u32 flags, u32 error); + +/** + * hw_fence_interop_to_hw_fence_error() - Convert synx signaling status to hw-fence error + * + * @param status : synx signaling status + * @return hw-fence error + */ +u32 hw_fence_interop_to_hw_fence_error(u32 status); + +/** + * hw_fence_interop_create_fence_from_import() - Creates hw-fence if necessary during synx_import, + * e.g. if there is no backing hw-fence for a synx fence. + * + * @param params : pointer to import params + * @return SYNX_SUCCESS upon success, -SYNX_INVALID if failed + */ +int hw_fence_interop_create_fence_from_import(struct synx_import_indv_params *params); + +/** + * hw_fence_interop_share_handle_status() - updates HW fence table with synx handle + * (if not already signaled) and return hw-fence handle by populating params.new_h_synx + * and returning signal status + * + * @param params : pointer to import params + * @param h_synx : synx handle + * @param signal_status: signalin status of fence + * + * @return SYNX_SUCCESS upon success, -SYNX_INVALID if failed + */ +int hw_fence_interop_share_handle_status(struct synx_import_indv_params *params, u32 h_synx, + u32 *signal_status); + +/** + * hw_fence_interop_get_fence() – return the dma-fence associated with the given handle + * + * @param h_synx : hw-fence handle + * + * @return dma-fence associated with hw-fence handle. Null or error pointer in case of error. + */ +void *hw_fence_interop_get_fence(u32 h_synx); + +#endif /* __HW_FENCE_INTEROP_H */ diff --git a/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_ipc.h b/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_ipc.h new file mode 100644 index 0000000000..f31135d1bb --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_ipc.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __HW_FENCE_DRV_IPC_H +#define __HW_FENCE_DRV_IPC_H + +/* ipc clients virtual client-id */ +#define HW_FENCE_IPC_CLIENT_ID_APPS_VID 8 +#define HW_FENCE_IPC_CLIENT_ID_GPU_VID 9 +#define HW_FENCE_IPC_CLIENT_ID_IPE_VID 11 +#define HW_FENCE_IPC_CLIENT_ID_VPU_VID 12 +#define HW_FENCE_IPC_CLIENT_ID_DPU_VID 25 +#define HW_FENCE_IPC_CLIENT_ID_IPA_VID 26 +#define HW_FENCE_IPC_CLIENT_ID_SOCCP_VID 46 +#define HW_FENCE_IPC_CLIENT_ID_IFE0_VID 128 +#define HW_FENCE_IPC_CLIENT_ID_IFE1_VID 129 +#define HW_FENCE_IPC_CLIENT_ID_IFE2_VID 130 +#define HW_FENCE_IPC_CLIENT_ID_IFE3_VID 131 +#define HW_FENCE_IPC_CLIENT_ID_IFE4_VID 132 +#define HW_FENCE_IPC_CLIENT_ID_IFE5_VID 133 +#define HW_FENCE_IPC_CLIENT_ID_IFE6_VID 134 +#define HW_FENCE_IPC_CLIENT_ID_IFE7_VID 135 +#define HW_FENCE_IPC_CLIENT_ID_IFE8_VID 136 +#define HW_FENCE_IPC_CLIENT_ID_IFE9_VID 137 +#define HW_FENCE_IPC_CLIENT_ID_IFE10_VID 138 +#define HW_FENCE_IPC_CLIENT_ID_IFE11_VID 139 + +/* ipc clients physical client-id */ +#define HW_FENCE_IPC_CLIENT_ID_APPS_PID 3 +#define HW_FENCE_IPC_CLIENT_ID_GPU_PID 4 +#define HW_FENCE_IPC_CLIENT_ID_IPE_PID 5 +#define HW_FENCE_IPC_CLIENT_ID_VPU_PID 8 +#define HW_FENCE_IPC_CLIENT_ID_DPU_PID 9 +#define HW_FENCE_IPC_CLIENT_ID_IFE0_PID 11 +#define HW_FENCE_IPC_CLIENT_ID_IFE1_PID 12 +#define HW_FENCE_IPC_CLIENT_ID_IFE2_PID 13 +#define HW_FENCE_IPC_CLIENT_ID_IFE3_PID 14 +#define HW_FENCE_IPC_CLIENT_ID_IFE4_PID 15 +#define HW_FENCE_IPC_CLIENT_ID_IFE5_PID 16 +#define HW_FENCE_IPC_CLIENT_ID_IFE6_PID 17 +#define HW_FENCE_IPC_CLIENT_ID_IFE7_PID 18 +#define HW_FENCE_IPC_CLIENT_ID_SOCCP_PID 22 + +/* ipc clients physical client-id on other targets */ +#define HW_FENCE_IPC_CLIENT_ID_IPE_PID_SUN 9 +#define HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN 20 +#define HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE 2 +#define HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE 3 +#define HW_FENCE_IPC_CLIENT_ID_IPA_PID_NIOBE 4 +#define HW_FENCE_IPC_CLIENT_ID_GPU_PID_NIOBE 8 +#define HW_FENCE_IPC_CLIENT_ID_IPE_PID_NIOBE 10 +#define HW_FENCE_IPC_CLIENT_ID_VPU_PID_NIOBE 11 +#define HW_FENCE_IPC_CLIENT_ID_SOCCP_PID_NIOBE 13 +#define HW_FENCE_IPC_CLIENT_ID_IFE0_PID_NIOBE 15 +#define HW_FENCE_IPC_CLIENT_ID_IFE1_PID_NIOBE 16 +#define HW_FENCE_IPC_CLIENT_ID_IFE2_PID_NIOBE 17 +#define HW_FENCE_IPC_CLIENT_ID_IFE3_PID_NIOBE 18 +#define HW_FENCE_IPC_CLIENT_ID_IFE4_PID_NIOBE 19 +#define HW_FENCE_IPC_CLIENT_ID_IFE5_PID_NIOBE 20 +#define HW_FENCE_IPC_CLIENT_ID_IFE6_PID_NIOBE 21 +#define HW_FENCE_IPC_CLIENT_ID_IFE7_PID_NIOBE 22 +#define HW_FENCE_IPC_CLIENT_ID_IFE8_PID_NIOBE 23 +#define HW_FENCE_IPC_CLIENT_ID_IFE9_PID_NIOBE 24 +#define HW_FENCE_IPC_CLIENT_ID_IFE10_PID_NIOBE 25 +#define HW_FENCE_IPC_CLIENT_ID_IFE11_PID_NIOBE 26 + +#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA 2 +#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE 2 +#define HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE 4 +#define HW_FENCE_IPC_FENCE_PROTOCOL_ID_SUN 4 +#define HW_FENCE_IPC_FENCE_PROTOCOL_ID_NIOBE 4 + +#define HW_FENCE_IPCC_HW_REV_170 0x00010700 /* Kalama */ +#define HW_FENCE_IPCC_HW_REV_203 0x00020003 /* Pineapple */ +#define HW_FENCE_IPCC_HW_REV_2A2 0x00020A02 /* Sun */ +#define HW_FENCE_IPCC_HW_REV_2B4 0x00020B04 /* Niobe */ + +#define IPC_PROTOCOLp_CLIENTc_VERSION(base, p, c) (base + (0x40000*p) + (0x1000*c)) +#define IPC_PROTOCOLp_CLIENTc_CONFIG(base, p, c) (base + 0x8 + (0x40000*p) + (0x1000*c)) +#define IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(base, p, c) \ + (base + 0x14 + (0x40000*p) + (0x1000*c)) +#define IPC_PROTOCOLp_CLIENTc_SEND(base, p, c) ((base + 0xc) + (0x40000*p) + (0x1000*c)) +#define IPC_PROTOCOLp_CLIENTc_RECV_ID(base, p, c) (base + 0x10 + (0x40000*p) + (0x1000*c)) +#define IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_CLEAR(base, p, c) (base + 0x1C + (0x40000*p) + (0x1000*c)) +#define HW_FENCE_IPC_RECV_ID_NONE 0xFFFFFFFF + +/** + * hw_fence_ipcc_trigger_signal() - Trigger ipc signal for the requested client/signal pair. + * @drv_data: driver data. + * @tx_client_id: ipc client id that sends the ipc signal. + * @rx_client_id: ipc client id that receives the ipc signal. + * @signal_id: signal id to send. + * + * This API triggers the ipc 'signal_id' from the 'tx_client_id' to the 'rx_client_id' + */ +void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data, + u32 tx_client_id, u32 rx_client_id, u32 signal_id); + +/** + * hw_fence_ipcc_enable_signaling() - Enable ipcc signaling for hw-fence driver. + * @drv_data: driver data. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_ipcc_enable_protocol() - Enable ipcc protocol used for hw-fencing + * (either compute l1 or fence depending on target) for given client. + * @drv_data: driver data + * @client_id: hw fence driver client id + * + * This should only be called once for each IPCC client, e.g. if protocol is enabled + * for one dpu client, it should not be called again for another dpu client. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_enable_protocol(struct hw_fence_driver_data *drv_data, u32 client_id); + +/** + * hw_fence_ipcc_enable_client_signal_pairs() - Enable ipcc signaling for all client-signal + * pairs required for hw-fencing for given client. + * @drv_data: driver data. + * @start_client: first hw fence driver client id for given ipcc client + * + * This API enables input signal from driver and fctl (if fctl is separate from driver) for + * given client. IPCC protocol must be enabled via hw_fence_ipcc_enable_protocol() prior + * to this call. This API iterates through driver's ipc client table to ensure all client- + * signal pairs for given client are enabled. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_enable_client_signal_pairs(struct hw_fence_driver_data *drv_data, + u32 start_client); + +/** + * hw_fence_ipcc_get_client_virt_id() - Returns the ipc client virtual id that corresponds to the + * hw fence driver client. + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * The ipc client id returned by this API is used by the hw fence driver when signaling the fence. + * + * Return: client_id on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id); + +/** + * hw_fence_ipcc_get_client_phys_id() - Returns the ipc client physical id that corresponds to the + * hw fence driver client. + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * The ipc client id returned by this API is used by the hw fence driver when signaling the fence. + * + * Return: client_id on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 client_id); + +/** + * hw_fence_ipcc_get_signal_id() - Returns the ipc signal id that corresponds to the hw fence + * driver client. + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * The ipc signal id returned by this API is used by the hw fence driver when signaling the fence. + * + * Return: client_id on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id); + +/** + * hw_fence_ipcc_needs_rxq_update() - Returns bool to indicate if client uses rx-queue. + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * Return: true if client needs to update rxq, false otherwise + */ +bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id); + +/** + * hw_fence_ipcc_signaled_needs_rxq_update() - Returns bool to indicate if client requires + * rx-queue update when registering to wait on an already signaled fence. + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * Return: true if client needs to update rxq when dma-fence is signaled, false otherwise + */ +bool hw_fence_ipcc_signaled_needs_rxq_update(struct hw_fence_driver_data *drv_data, + int client_id); + +/** + * hw_fence_ipcc_signaled_needs_ipc_irq() - Returns bool to indicate if client needs ipc interrupt + * for already signaled fences + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * Return: true if client needs ipc interrupt for signaled fences, false otherwise + */ +bool hw_fence_ipcc_signaled_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id); + +/** + * hw_fence_ipcc_txq_update_needs_ipc_irq() - Returns bool to indicate if client needs ipc interrupt + * when updating client tx queue in hlos + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * Return: true if client needs ipc interrupt when updating client tx queue, false otherwise + */ +bool hw_fence_ipcc_txq_update_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id); + +/** + * hw_fence_ipcc_get_signaled_clients_mask() - Returns mask to indicate signals for which clients + * were received by HW Fence Driver + * @drv_data: driver_data + * + * Return: mask on success or zero upon error + */ +u64 hw_fence_ipcc_get_signaled_clients_mask(struct hw_fence_driver_data *drv_data); + +#endif /* __HW_FENCE_DRV_IPC_H */ diff --git a/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_priv.h b/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_priv.h new file mode 100644 index 0000000000..2097675895 --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_priv.h @@ -0,0 +1,701 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __HW_FENCE_DRV_INTERNAL_H +#define __HW_FENCE_DRV_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "msm_hw_fence.h" + +/* max u64 to indicate invalid fence */ +#define HW_FENCE_INVALID_PARENT_FENCE (~0ULL) + +/* hash algorithm constants */ +#define HW_FENCE_HASH_A_MULT 4969 /* a multiplier for Hash algorithm */ +#define HW_FENCE_HASH_C_MULT 907 /* c multiplier for Hash algorithm */ + +/* number of queues per type (i.e. ctrl or client queues) */ +#define HW_FENCE_CTRL_QUEUES 2 /* Rx and Tx Queues */ +#define HW_FENCE_CLIENT_QUEUES 2 /* Rx and Tx Queues */ + +/* hfi headers calculation */ +#define HW_FENCE_HFI_TABLE_HEADER_SIZE(has_soccp) \ + ((has_soccp) ? (sizeof(struct msm_hw_fence_hfi_queue_table_header_v2)) : \ + (sizeof(struct msm_hw_fence_hfi_queue_table_header))) + +#define HW_FENCE_HFI_QUEUE_HEADER_SIZE(has_soccp) \ + ((has_soccp) ? (sizeof(struct msm_hw_fence_hfi_queue_header_v2)) : \ + (sizeof(struct msm_hw_fence_hfi_queue_header))) + +#define HW_FENCE_HFI_CTRL_HEADERS_SIZE(has_soccp) (HW_FENCE_HFI_TABLE_HEADER_SIZE(has_soccp) + \ + (HW_FENCE_HFI_QUEUE_HEADER_SIZE(has_soccp) * HW_FENCE_CTRL_QUEUES)) + +#define HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num, has_soccp) \ + (HW_FENCE_HFI_TABLE_HEADER_SIZE(has_soccp) + \ + (HW_FENCE_HFI_QUEUE_HEADER_SIZE(has_soccp) * queues_num)) + +/* + * CTRL queue uses same 64-byte aligned payload size as client queue. + */ +#define HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE (sizeof(struct msm_hw_fence_queue_payload)) + +#define HW_FENCE_CTRL_QUEUE_PAYLOAD HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE +#define HW_FENCE_CLIENT_QUEUE_PAYLOAD (sizeof(struct msm_hw_fence_queue_payload)) +#define HW_FENCE_CTRL_QUEUE_ENTRIES 64 + +/* + * On targets with SOCCP, client RxQ lock is 64-bit in size but each lock is at a separate 64-byte + * chunk of memory + */ +#define HW_FENCE_LOCK_IDX_OFFSET 8 + +/* Locks area for all clients with RxQ */ +#define HW_FENCE_MEM_LOCKS_SIZE(rxq_clients_num) (HW_FENCE_LOCK_IDX_OFFSET * sizeof(u64) * \ + rxq_clients_num) + +#define HW_FENCE_TX_QUEUE 1 +#define HW_FENCE_RX_QUEUE 2 + +/* ClientID for the internal join fence, this is used by the framework when creating a join-fence */ +#define HW_FENCE_JOIN_FENCE_CLIENT_ID (~(u32)0) + +/** + * msm hw fence flags: + * MSM_HW_FENCE_FLAG_SIGNAL - Flag set when the hw-fence is signaled + * MSM_HW_FENCE_FLAG_CREATE_SIGNALED - Flag set when the hw-fence is created to back a signaled + * dma-fence whose hw-fence has been destroyed + * MSM_HW_FENCE_FLAG_INTERNAL_OWNED - Flag set when HLOS Native fence is internally owned and + * present in dma-fence table + */ +#define MSM_HW_FENCE_FLAG_SIGNAL BIT(0) +#define MSM_HW_FENCE_FLAG_CREATE_SIGNALED BIT(1) +#define MSM_HW_FENCE_FLAG_INTERNAL_OWNED BIT(2) + +/** + * MSM_HW_FENCE_MAX_JOIN_PARENTS: + * Maximum number of parents that a fence can have for a join-fence + */ +#define MSM_HW_FENCE_MAX_JOIN_PARENTS 3 + +/** + * HW_FENCE_PAYLOAD_REV: + * Payload version with major and minor version information + */ +#define HW_FENCE_PAYLOAD_REV(major, minor) (major << 8 | (minor & 0xFF)) + +/** + * HW_FENCE_EVENT_MAX_DATA: + * Maximum data that can be added to the debug event + */ +#define HW_FENCE_EVENT_MAX_DATA 12 + +/** + * HW_FENCE_FCTL_REFCOUNT: + * Refcount held by Fence Controller for signaling. + * This bit in hw_fence->refcount is set during creation of a hw-fence and released when the + * hw-fence is signaled by Fence Controller. + */ +#define HW_FENCE_FCTL_REFCOUNT BIT(31) + +/** + * HW_FENCE_DMA_FENCE_REFCOUNT: + * Refcount held by HW Fence Driver for dma-fence release or signal. + * For dma-fences internally owned by the HW Fence Driver, this is set during hw-fence creation and + * cleared during dma_fence_release. + * For external dma-fences initialized by the client, this is set when the hw-fence signal callback + * is added to the dma-fence and cleared during dma_fence_signal. + */ +#define HW_FENCE_DMA_FENCE_REFCOUNT BIT(30) + +/** + * HW_FENCE_HLOS_REFCOUNT_MASK: + * Mask for refcounts acquired and released from HLOS. + * The field "hw_fence->refcount & HW_FENCE_HLOS_REFCOUNT_MASK" stores the number of refcounts held + * by HW Fence clients or HW Fence Driver. + */ +#define HW_FENCE_HLOS_REFCOUNT_MASK GENMASK(29, 0) + +/* + * DMA_FENCE_HASH_TABLE_BIT: Bit that define the size of the dma-fences hash table + * DMA_FENCE_HASH_TABLE_SIZE: Size of dma-fences hash table + */ +#define DMA_FENCE_HASH_TABLE_BIT (12) /* size of table = (1 << 12) = 4096 */ +#define DMA_FENCE_HASH_TABLE_SIZE (1 << DMA_FENCE_HASH_TABLE_BIT) + +/** + * enum hw_fence_client_data_id - Enum with the clients having client_data, an optional + * parameter passed from the waiting client and returned + * to it upon fence signaling. + * @HW_FENCE_CLIENT_DATA_ID_CTX0: GFX Client 0. + * @HW_FENCE_MAX_CLIENTS_WITH_DATA: Max number of clients with data, also indicates an + * invalid hw_fence_client_data_id + */ +enum hw_fence_client_data_id { + HW_FENCE_CLIENT_DATA_ID_CTX0, + HW_FENCE_MAX_CLIENTS_WITH_DATA, +}; + +/** + * struct msm_hw_fence_queue - Structure holding the data of the hw fence queues. + * @va_queue: pointer to the virtual address of the queue elements + * @q_size_bytes: size of the queue + * @va_header: pointer to the hfi header virtual address + * @pa_queue: physical address of the queue + * @rd_wr_idx_start: start read and write indexes for client queue (zero by default) + * @rd_wr_idx_factor: factor to multiply custom index to get index in dwords (one by default) + * @skip_wr_idx: bool to indicate if update to write_index is skipped within hw fence driver and + * hfi_header->tx_wm is updated instead + */ +struct msm_hw_fence_queue { + void *va_queue; + u32 q_size_bytes; + void *va_header; + phys_addr_t pa_queue; + u32 rd_wr_idx_start; + u32 rd_wr_idx_factor; + bool skip_wr_idx; +}; + +/** + * enum payload_type - Enum with the queue payload types. + * HW_FENCE_PAYLOAD_TYPE_1: client queue payload + * HW_FENCE_PAYLOAD_TYPE_2: ctrl queue payload for fence error; client_data stores client_id + * HW_FENCE_PAYLOAD_TYPE_3: ctrl queue payload for memory sharing + * HW_FENCE_PAYLOAD_TYPE_4: ctrl queue payload for soccp ssr + */ +enum payload_type { + HW_FENCE_PAYLOAD_TYPE_1 = 1, + HW_FENCE_PAYLOAD_TYPE_2, + HW_FENCE_PAYLOAD_TYPE_3, + HW_FENCE_PAYLOAD_TYPE_4 +}; + +/** + * struct msm_hw_fence_client - Structure holding the per-Client allocated resources. + * @client_id: internal client_id used within HW fence driver; index into the clients struct + * @client_id_ext: external client_id, equal to client_id except for clients with configurable + * number of sub-clients (e.g. ife clients) + * @mem_descriptor: hfi header memory descriptor + * @queues: queues descriptor + * @queues_num: number of client queues + * @fence_error_cb: function called for waiting clients that need HLOS notification of fence error + * @fence_error_cb_userdata: opaque pointer registered with fence error callback and passed to + * client during invocation of callback function + * @error_cb_lock: lock to synchronize access to fence error cb and fence error cb data + * @ipc_signal_id: id of the signal to be triggered for this client + * @ipc_client_vid: virtual id of the ipc client for this hw fence driver client + * @ipc_client_pid: physical id of the ipc client for this hw fence driver client + * @update_rxq: bool to indicate if client requires rx queue update in general signal case + * (e.g. if dma-fence is signaled) + * @signaled_update_rxq: bool to indicate if client requires rx queue update when registering to + * wait on an already signaled fence + * @signaled_send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences + * @txq_update_send_ipc: bool to indicate if client requires ipc interrupt for txq updates + * @skip_fctl_ref: bool to indicate if client-created fences should not have fctl refcount during + * initial creation; this refcount is instead set during synx_import call + * @context_id: context id for fences created internally + * @seqno: sequence no for fences created internally + * @wait_queue: wait queue for the validation clients + * @val_signal: doorbell flag to signal the validation clients in the wait queue + * @kref: number of active references to this client + */ +struct msm_hw_fence_client { + enum hw_fence_client_id client_id; + enum hw_fence_client_id client_id_ext; + struct msm_hw_fence_mem_addr mem_descriptor; + struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES]; + int queues_num; + msm_hw_fence_error_cb_t fence_error_cb; + void *fence_error_cb_userdata; + struct mutex error_cb_lock; + int ipc_signal_id; + int ipc_client_vid; + int ipc_client_pid; + bool update_rxq; + bool signaled_update_rxq; + bool signaled_send_ipc; + bool txq_update_send_ipc; + bool skip_fctl_ref; + u64 context_id; + atomic_t seqno; + struct kref kref; +#if IS_ENABLED(CONFIG_DEBUG_FS) + wait_queue_head_t wait_queue; + atomic_t val_signal; +#endif /* CONFIG_DEBUG_FS */ +}; + +/** + * struct msm_hw_fence_mem_data - Structure holding internal memory attributes + * + * @attrs: attributes for the memory allocation + */ +struct msm_hw_fence_mem_data { + unsigned long attrs; +}; + +/** + * struct msm_hw_fence_dbg_data - Structure holding debugfs data + * + * @root: debugfs root + * @entry_rd: flag to indicate if debugfs dumps a single line or table + * @context_rd: debugfs setting to indicate which context id to dump + * @seqno_rd: debugfs setting to indicate which seqno to dump + * @client_id_rd: debugfs setting to indicate which client queue(s) to dump + * @hw_fence_sim_release_delay: delay in micro seconds for the debugfs node that simulates the + * hw-fences behavior, to release the hw-fences + * @create_hw_fences: boolean to continuosly create hw-fences within debugfs + * @clients_list: list of debug clients registered + * @clients_list_lock: lock to synchronize access to the clients list + * @lock_wake_cnt: number of times that driver triggers wake-up ipcc to unlock inter-vm try-lock + */ +struct msm_hw_fence_dbg_data { + struct dentry *root; + + bool entry_rd; + u64 context_rd; + u64 seqno_rd; + u32 client_id_rd; + + u32 hw_fence_sim_release_delay; + bool create_hw_fences; + + struct list_head clients_list; + struct mutex clients_list_lock; + + u64 lock_wake_cnt; +}; + +/** + * struct hw_fence_client_type_desc - Structure holding client type properties, including static + * properties and client queue properties read from device-tree. + * + * @name: name of client type, used to parse properties from device-tree + * @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g. + * HW_FENCE_CLIENT_ID_CTL0 for DPU clients + * @max_clients_num: maximum number of clients of given client type + * @clients_num: number of clients of given client type + * @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or + * two (for both Tx and Rx Queues) + * @queue_entries: number of entries per client queue of given client type + * @start_padding: size of padding between queue table header and first queue header in bytes + * @end_padding: size of padding between queue header(s) and first queue payload in bytes + * @mem_size: size of memory allocated for client queue(s) per client in bytes + * @txq_idx_start: start read and write indexes for client tx queue (zero by default) + * @txq_idx_factor: factor to multiply custom TxQ idx to get index in dwords (one by default) + * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence + * driver and hfi_header->tx_wm is updated instead + * @skip_fctl_ref: bool to indicate if client-created fences should not have fctl refcount during + * initial creation; this refcount is instead set during synx_import call + */ +struct hw_fence_client_type_desc { + char *name; + enum hw_fence_client_id init_id; + u32 max_clients_num; + u32 clients_num; + u32 queues_num; + u32 queue_entries; + u32 start_padding; + u32 end_padding; + u32 mem_size; + u32 txq_idx_start; + u32 txq_idx_factor; + bool skip_txq_wr_idx; + bool skip_fctl_ref; +}; + +/** + * struct hw_fence_client_queue_desc - Structure holding client queue properties for a client. + * + * @type: pointer to client queue properties of client type + * @start_offset: start offset of client queue memory region, from beginning of carved-out memory + * allocation for hw fence driver + */ +struct hw_fence_client_queue_desc { + struct hw_fence_client_type_desc *type; + u32 start_offset; +}; + +/** + * struct hw_fence_signal_cb - Structure holding hw-fence callback data for dma-fence callback + * + * @fence_cb: fence callback data structure used to add dma_fence_callback + * @drv_data: structure holding internal hw-fence driver data + * @hash: hash of hw-fence to decrement refcount in dma-fence callback + */ +struct hw_fence_signal_cb { + struct dma_fence_cb fence_cb; + struct hw_fence_driver_data *drv_data; + u64 hash; +}; + +/** + * struct hw_fence_soccp - Structure holding hw-fence data specific to soccp + * @rproc_ph: phandle for soccp rproc object used to set power vote + * @rproc: soccp rproc object used to set power vote + * @rproc_lock: lock to synchronization modifications to soccp rproc data structure and state + * @is_awake: true if HW Fence Driver has successfully set a power vote on soccp that has not been + * removed by SSR; false if soccp has not set a power vote, successfully removed its power vote, + * or soccp has crashed + * @usage_cnt: independent counter of number of users of SOCCP, 1 if no one is using + * @ssr_nb: notifier block used for soccp ssr + * @ssr_notifier: soccp ssr notifier + * @ssr_wait_queue: wait queue to notify ssr callback that a payload has been received from soccp + * @ssr_cnt: counts number of times soccp has restarted, zero if initial boot-up + */ +struct hw_fence_soccp { + phandle rproc_ph; + struct rproc *rproc; + struct mutex rproc_lock; + bool is_awake; + refcount_t usage_cnt; + struct notifier_block ssr_nb; + void *ssr_notifier; + wait_queue_head_t ssr_wait_queue; + u32 ssr_cnt; +}; + +/** + * struct hw_fence_driver_data - Structure holding internal hw-fence driver data + * + * @dev: device driver pointer + * @resources_ready: value set by driver at end of probe, once all resources are ready + * @hw_fence_table_entries: total number of hw-fences in the global table + * @hw_fence_mem_fences_table_size: hw-fences global table total size + * @hw_fence_queue_entries: total number of entries that can be available in the queue + * @hw_fence_ctrl_queue_size: size of the ctrl queue for the payload + * @hw_fence_mem_ctrl_queues_size: total size of ctrl queues, including: header + rxq + txq + * @hw_fence_client_queue_size: descriptors of client queue properties for each hw fence client + * @hw_fence_client_types: descriptors of properties for each hw fence client type + * @rxq_clients_num: number of supported hw fence clients with rxq (configured based on device-tree) + * @clients_num: number of supported hw fence clients (configured based on device-tree) + * @hw_fences_tbl: pointer to the hw-fences table + * @hw_fences_tbl_cnt: number of elements in the hw-fence table + * @hlos_key_tbl: pointer to table of keys tracked by hlos only, same size as the hw-fences table + * @events: start address of hw fence debug events + * @total_events: total number of hw fence debug events supported + * @client_lock_tbl: pointer to the per-client locks table + * @client_lock_tbl_cnt: number of elements in the locks table + * @hw_fences_mem_desc: memory descriptor for the hw-fence table + * @clients_locks_mem_desc: memory descriptor for the locks table + * @ctrl_queue_mem_desc: memory descriptor for the ctrl queues + * @ctrl_queues: pointer to the ctrl queues + * @io_mem_base: pointer to the carved-out io memory + * @res: resources for the carved out memory + * @size: size of the carved-out memory + * @label: label for the carved-out memory (this is used by SVM to find the memory) + * @peer_name: peer name for this carved-out memory + * @rm_nb: hyp resource manager notifier + * @memparcel: memparcel for the allocated memory + * @used_mem_size: total memory size of global table, lock region, and ctrl and client queues + * @cpu_addr_cookie: bogus cpu address returned by dma_alloc_attrs which is used for freeing memory + * @db_label: doorbell label + * @rx_dbl: handle to the Rx doorbell + * @debugfs_data: debugfs info + * @ipcc_reg_base: base for ipcc regs mapping + * @ipcc_io_mem: base for the ipcc io mem map + * @ipcc_size: size of the ipcc io mem mapping + * @protocol_id: ipcc protocol id used by this driver + * @ipcc_client_vid: ipcc client virtual-id for this driver + * @ipcc_client_pid: ipcc client physical-id for this driver + * @ipcc_fctl_vid: ipcc client virtual-id for fctl + * @ipcc_fctl_pid: ipcc client physical-id for fctl + * @ipc_clients_table: table with the ipcc mapping for each client of this driver + * @qtime_reg_base: qtimer register base address + * @qtime_io_mem: qtimer io mem map + * @qtime_size: qtimer io mem map size + * @client_id_mask: bitmask for tracking registered client_ids + * @clients_register_lock: lock to synchronize clients registration and deregistration + * @clients: table with the handles of the registered clients; size is equal to clients_num + * @fctl_ready: flag to indicate if fence controller has been initialized + * @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized + * @ipcc_val_initialized: flag to indicate if val is initialized + * @dma_fence_table_lock: lock to synchronize access to dma-fence table + * @dma_fence_table: table with internal dma-fences for hw-fences + * @has_soccp: flag to indicate if soccp is present (otherwise vm is used) + * @soccp_listener_thread: thread that processes interrupts received from soccp + * @soccp_wait_queue: wait queue to notify soccp_listener_thread of new interrupts + * @signaled_clients_mask: mask to track signals received from soccp by hw-fence driver + * @soccp_props: soccp-specific properties for ssr and power votes + */ +struct hw_fence_driver_data { + + struct device *dev; + bool resources_ready; + + /* Table & Queues info */ + u32 hw_fence_table_entries; + u32 hw_fence_mem_fences_table_size; + u32 hw_fence_queue_entries; + /* ctrl queues */ + u32 hw_fence_ctrl_queue_size; + u32 hw_fence_mem_ctrl_queues_size; + /* client queues */ + struct hw_fence_client_queue_desc *hw_fence_client_queue_size; + struct hw_fence_client_type_desc *hw_fence_client_types; + u32 rxq_clients_num; + u32 clients_num; + + /* HW Fences Table VA */ + struct msm_hw_fence *hw_fences_tbl; + u64 *hlos_key_tbl; + u32 hw_fences_tbl_cnt; + + /* events */ + struct msm_hw_fence_event *events; + u32 total_events; + + /* Table with a Per-Client Lock */ + u64 *client_lock_tbl; + u32 client_lock_tbl_cnt; + + /* Memory Descriptors */ + struct msm_hw_fence_mem_addr hw_fences_mem_desc; + struct msm_hw_fence_mem_addr clients_locks_mem_desc; + struct msm_hw_fence_mem_addr ctrl_queue_mem_desc; + struct msm_hw_fence_queue ctrl_queues[HW_FENCE_CTRL_QUEUES]; + + /* carved out memory */ + void __iomem *io_mem_base; + struct resource res; + size_t size; + u32 label; + u32 peer_name; + struct notifier_block rm_nb; + u32 memparcel; + u32 used_mem_size; + void *cpu_addr_cookie; + + /* doorbell */ + u32 db_label; + + /* VM virq */ + void *rx_dbl; + + /* debugfs */ + struct msm_hw_fence_dbg_data debugfs_data; + + /* ipcc regs */ + phys_addr_t ipcc_reg_base; + void __iomem *ipcc_io_mem; + uint32_t ipcc_size; + u32 protocol_id; + u32 ipcc_client_vid; + u32 ipcc_client_pid; + u32 ipcc_fctl_vid; + u32 ipcc_fctl_pid; + + /* table with mapping of ipc client for each hw-fence client */ + struct hw_fence_client_ipc_map *ipc_clients_table; + + /* qtime reg */ + phys_addr_t qtime_reg_base; + void __iomem *qtime_io_mem; + uint32_t qtime_size; + + /* synchronize client_ids registration and deregistration */ + struct mutex clients_register_lock; + + /* table with registered client handles */ + struct msm_hw_fence_client **clients; + + bool fctl_ready; + /* state variables */ + bool ipcc_dpu_initialized; + +#if IS_ENABLED(CONFIG_DEBUG_FS) + bool ipcc_val_initialized; +#endif /* CONFIG_DEBUG_FS */ + + spinlock_t dma_fence_table_lock; + /* table with internal dma-fences created by the this driver on client's behalf */ + DECLARE_HASHTABLE(dma_fence_table, DMA_FENCE_HASH_TABLE_BIT); + + /* soccp is present */ + bool has_soccp; + struct task_struct *soccp_listener_thread; + wait_queue_head_t soccp_wait_queue; + atomic_t signaled_clients_mask; + struct hw_fence_soccp soccp_props; +}; + +/** + * struct msm_hw_fence_queue_payload - hardware fence clients queues payload. + * @size: size of queue payload + * @type: type of queue payload + * @version: version of queue payload. High eight bits are for major and lower eight + * bits are for minor version + * @ctxt_id: context id of the dma fence + * @seqno: sequence number of the dma fence + * @hash: fence hash + * @flags: see MSM_HW_FENCE_FLAG_* flags descriptions + * @client_data: data passed from and returned to waiting client upon fence signaling + * @error: error code for this fence, fence controller receives this + * error from the signaling client through the tx queue and + * propagates the error to the waiting client through rx queue + * @timestamp_lo: low 32-bits of qtime of when the payload is written into the queue + * @timestamp_hi: high 32-bits of qtime of when the payload is written into the queue + */ +struct msm_hw_fence_queue_payload { + u32 size; + u16 type; + u16 version; + u64 ctxt_id; + u64 seqno; + u64 hash; + u64 flags; + u64 client_data; + u32 error; + u32 timestamp_lo; + u32 timestamp_hi; + u32 reserve; +}; + +/** + * struct msm_hw_fence_event - hardware fence ctl debug event + * time: qtime when the event is logged + * cpu: cpu id where the event is logged + * data_cnt: count of valid data available in the data field + * data: debug data logged by the event + */ +struct msm_hw_fence_event { + u64 time; + u32 cpu; + u32 data_cnt; + u32 data[HW_FENCE_EVENT_MAX_DATA]; +}; + +/** + * struct msm_hw_fence - structure holding each hw fence data. + * @valid: field updated when a hw-fence is reserved. True if hw-fence is in use + * @error: field to hold a hw-fence error + * @ctx_id: context id + * @seq_id: sequence id + * @wait_client_mask: bitmask holding the waiting-clients of the fence + * @fence_allocator: field to indicate the client_id that reserved the fence + * @fence_signal_client: client that signaled the fence + * @lock: this field is required to share information between the Driver & Driver || + * Driver & FenceCTL. Needs to be 64-bit atomic inter-processor lock. + * @flags: field to indicate the state of the fence + * @parent_list: list of indexes with the parents for a child-fence in a join-fence + * @parent_cnt: total number of parents for a child-fence in a join-fence + * @pending_child_cnt: children refcount for a parent-fence in a join-fence. Access must be atomic + * or locked + * @fence_create_time: debug info with the create time timestamp + * @fence_trigger_time: debug info with the trigger time timestamp + * @fence_wait_time: debug info with the register-for-wait timestamp + * @refcount: refcount on the hw-fence. This is split into multiple fields, see + * HW_FENCE_HLOS_REFCOUNT_MASK and HW_FENCE_FCTL_REFCOUNT and HW_FENCE_DMA_FENCE_REFCOUNT + * for more detail + * @h_synx: synx handle, nonzero if hw-fence is also backed by synx fence + * @client_data: array of data optionally passed from and returned to clients waiting on the fence + * during fence signaling + */ +struct msm_hw_fence { + u32 valid; + u32 error; + u64 ctx_id; + u64 seq_id; + u64 wait_client_mask; + u32 fence_allocator; + u32 fence_signal_client; + u64 lock; /* Datatype must be 64-bit. */ + u64 flags; + u64 parent_list[MSM_HW_FENCE_MAX_JOIN_PARENTS]; + u32 parents_cnt; + u32 pending_child_cnt; + u64 fence_create_time; + u64 fence_trigger_time; + u64 fence_wait_time; + u32 refcount; + u32 h_synx; + u64 client_data[HW_FENCE_MAX_CLIENTS_WITH_DATA]; +}; + +int hw_fence_init(struct hw_fence_driver_data *drv_data); +int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + struct msm_hw_fence_mem_addr *mem_descriptor); +int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client); +int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client); +void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client); +void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client); +int hw_fence_create(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hlos_key, + u64 context, u64 seqno, u64 *hash); +int hw_fence_add_callback(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, u64 hash); +int hw_fence_destroy(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hlos_key, + u64 context, u64 seqno); +int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hash); +int hw_fence_destroy_refcount(struct hw_fence_driver_data *drv_data, u64 hash, u32 ref); +int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + struct dma_fence_array *array, u64 *hash_join_fence, u64 client_data); +int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash, + u64 client_data); +int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, + u64 flags, u64 client_data, u32 error, int queue_type); +int hw_fence_update_queue_helper(struct hw_fence_driver_data *drv_data, u32 client_id, + struct msm_hw_fence_queue *queue, u16 type, u64 ctxt_id, u64 seqno, u64 hash, u64 flags, + u64 client_data, u32 error, int queue_type); +int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hash, u32 error); +inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data); +char *_get_queue_type(int queue_type); +int hw_fence_read_queue(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, + int queue_type); +int hw_fence_read_queue_helper(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_queue *queue, struct msm_hw_fence_queue_payload *payload); +void hw_fence_get_queue_idx_ptrs(struct hw_fence_driver_data *drv_data, void *va_header, + u32 **rd_idx_ptr, u32 **wr_idx_ptr, u32 **tx_wm_ptr); +int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, + struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, + u64 seqno, u64 *hash, u64 client_data); +struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hlos_key, + u64 context, u64 seqno, u64 *hash); +struct msm_hw_fence *hw_fence_find_with_dma_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash, + bool *is_signaled, bool create); +enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id client_id); +int hw_fence_signal_fence(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, u64 hash, + u32 error, bool release_ref); +int hw_fence_get_flags_error(struct hw_fence_driver_data *drv_data, u64 hash, u64 *flags, + u32 *error); +int hw_fence_update_hsynx(struct hw_fence_driver_data *drv_data, u64 hash, u32 h_synx, + bool wait_for); +int hw_fence_ssr_cleanup_table(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fences_tbl, u32 table_total_entries, u64 in_flight_lock); + +/* apis for internally managed dma-fence */ +struct dma_fence *hw_dma_fence_init(struct msm_hw_fence_client *hw_fence_client, u64 context, + u64 seqno); +struct dma_fence *hw_fence_internal_dma_fence_create(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 *hash); +struct dma_fence *hw_fence_dma_fence_find(struct hw_fence_driver_data *drv_data, + u64 hash, bool incr_refcount); + +/* internal checks used by msm_hw_fence and synx_hwfence functions */ +int hw_fence_check_hw_fence_driver(struct hw_fence_driver_data *drv_data); +int hw_fence_check_valid_client(struct hw_fence_driver_data *drv_data, void *client_handle); +int hw_fence_check_valid_fctl(struct hw_fence_driver_data *drv_data, void *client_handle); + +#endif /* __HW_FENCE_DRV_INTERNAL_H */ diff --git a/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_utils.h b/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_utils.h new file mode 100644 index 0000000000..f48db2affb --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_utils.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __HW_FENCE_DRV_UTILS_H +#define __HW_FENCE_DRV_UTILS_H + +/** + * HW_FENCE_MAX_CLIENT_TYPE_STATIC: + * Total number of client types without configurable number of sub-clients (GFX, DPU, VAL) + */ +#define HW_FENCE_MAX_CLIENT_TYPE_STATIC 3 + +/** + * HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE: + * Maximum number of client types with configurable number of sub-clients (e.g. IPE, VPU, IFE, IPA) + */ +#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 15 + +/** + * HW_FENCE_MAX_STATIC_CLIENTS_INDEX: + * Maximum number of static clients, i.e. clients without configurable numbers of sub-clients + */ +#define HW_FENCE_MAX_STATIC_CLIENTS_INDEX HW_FENCE_CLIENT_ID_IPE + +/** + * enum hw_fence_mem_reserve - Types of reservations for the carved-out memory. + * HW_FENCE_MEM_RESERVE_CTRL_QUEUE: Reserve memory for the ctrl rx/tx queues. + * HW_FENCE_MEM_RESERVE_LOCKS_REGION: Reserve memory for the per-client locks memory region. + * HW_FENCE_MEM_RESERVE_TABLE: Reserve memory for the hw-fences global table. + * HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: Reserve memory per-client for the rx/tx queues. + * HW_FENCE_MEM_RESERVE_EVENTS_BUFF: Reserve memory for the debug events + */ +enum hw_fence_mem_reserve { + HW_FENCE_MEM_RESERVE_CTRL_QUEUE, + HW_FENCE_MEM_RESERVE_LOCKS_REGION, + HW_FENCE_MEM_RESERVE_TABLE, + HW_FENCE_MEM_RESERVE_CLIENT_QUEUE, + HW_FENCE_MEM_RESERVE_EVENTS_BUFF +}; + +/** + * global_atomic_store() - Inter-processor lock + * @drv_data: hw fence driver data + * @lock: memory to lock + * @val: if true, api locks the memory, if false it unlocks the memory + */ +void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, bool val); + +/** + * hw_fence_utils_init_virq() - Initialize doorbell (i.e. vIRQ) for SVM to HLOS signaling + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_init_soccp_irq() - Initialize interrupt handler for SOCCP to HLOS signaling + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_init_soccp_irq(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_register_soccp_ssr_notifier() - registers rproc ssr notifier for soccp + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_register_soccp_ssr_notifier(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_process_signaled_clients_mask() - Process the mask containing HW Fence client IDs + * that HW Fence Driver is responsible for, i.e. + * ctrl queue and validation clients. + * @drv_data: hw fence driver data. + * @mask: mask with signaled clients + */ +void hw_fence_utils_process_signaled_clients_mask(struct hw_fence_driver_data *drv_data, + u64 mask); + +/** + * hw_fence_utils_alloc_mem() - Allocates the carved-out memory pool that will be used for the HW + * Fence global table, locks and queues. + * @hw_fence_drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *hw_fence_drv_data); + +/** + * hw_fence_utils_reserve_mem() - Reserves memory from the carved-out memory pool. + * @drv_data: hw fence driver data. + * @type: memory reservation type. + * @phys: physical address of the carved-out memory pool + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, + enum hw_fence_mem_reserve type, phys_addr_t *phys, void **pa, u32 *size, int client_id); + +/** + * hw_fence_utils_parse_dt_props() - Init dt properties + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_map_ipcc() - Maps IPCC registers and enable signaling + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_map_qtime() - Maps qtime register + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_cleanup_fence() - Cleanup the hw-fence from a specified client + * @drv_data: hw fence driver data + * @hw_fence_client: client, for which the fence must be cleared + * @hw_fence: hw-fence to cleanup + * @hash: hash of the hw-fence to cleanup + * @reset_flags: flags to determine how to handle the reset + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, + u32 reset_flags); + +/** + * hw_fence_utils_fence_error_cb() - Invokes fence error callback registered by specified client + * + * @hw_fence_client: client, for which fence error callback must be invoked + * @ctxt_id: context id of the hw-fence + * @seqno: sequence number of the hw-fence + * @hash: hash of the hw-fence + * @flags: flags of the hw-fence + * @error: error of the hw-fence + * + * Returns zero if success, otherwise returns negative error code + */ +int hw_fence_utils_fence_error_cb(struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, + u64 seqno, u64 hash, u64 flags, u32 error); + +/** + * hw_fence_utils_get_client_id_priv() - Gets the index into clients struct within hw fence driver + * from the client_id used externally + * + * Performs a 1-to-1 mapping for all client IDs less than HW_FENCE_MAX_STATIC_CLIENTS_INDEX, + * otherwise consolidates client IDs of clients with configurable number of sub-clients. Fails if + * provided with client IDs for such clients when support for those clients is not configured in + * device-tree. + * + * @drv_data: hw fence driver data + * @client_id: external client_id to get internal client_id for + * + * Returns client_id < drv_data->clients_num if success, otherwise returns HW_FENCE_CLIENT_MAX + */ +enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data, + enum hw_fence_client_id client_id); + +/** + * hw_fence_utils_get_queues_num() - Returns number of client queues for the client_id. + * + * @drv_data: driver data + * @client_id: hw fence driver client id + * + * Returns: number of client queues + */ +int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id); + +/** + * hw_fence_utils_get_skip_fctl_ref() - Returns if client avoids creating fences with fctl + * refcount initialized. + * + * @drv_data: driver data + * @client_id: hw fence driver client id + * + * Returns: number of client queues + */ +int hw_fence_utils_get_skip_fctl_ref(struct hw_fence_driver_data *drv_data, int client_id); + + +/** + * hw_fence_utils_set_power_vote() - Sets the power vote for soccp. + * + * @drv_data: driver data + * @state: power state to set + * + * Returns: 0 if successful, error if not + */ +int hw_fence_utils_set_power_vote(struct hw_fence_driver_data *drv_data, bool state); +#endif /* __HW_FENCE_DRV_UTILS_H */ diff --git a/qcom/opensource/mm-drivers/hw_fence/include/msm_hw_fence.h b/qcom/opensource/mm-drivers/hw_fence/include/msm_hw_fence.h new file mode 100644 index 0000000000..e39e1b1fdb --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/include/msm_hw_fence.h @@ -0,0 +1,713 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __MSM_HW_FENCE_H +#define __MSM_HW_FENCE_H + +#include +#include + +extern struct hw_fence_driver_data *hw_fence_drv_data; +extern bool hw_fence_driver_enable; + +/** + * MSM_HW_FENCE_FLAG_ENABLED_BIT - Hw-fence is enabled for the dma_fence. + * + * Drivers set this flag in the dma_fence 'flags' to fences that + * are backed up by a hw-fence. + */ +#define MSM_HW_FENCE_FLAG_ENABLED_BIT 31 + +/** + * MSM_HW_FENCE_FLAG_SIGNALED_BIT - Hw-fence is signaled for the dma_fence. + * + * This flag is set by hw-fence driver when a client wants to add itself as + * a waiter for this hw-fence. The client uses this flag to avoid adding itself + * as a waiter for a fence that is already retired. + */ +#define MSM_HW_FENCE_FLAG_SIGNALED_BIT 30 + +/** + * MSM_HW_FENCE_ERROR_RESET - Hw-fence flagged as error due to forced reset from producer. + */ +#define MSM_HW_FENCE_ERROR_RESET BIT(0) + +/** + * MSM_HW_FENCE_RESET_WITHOUT_ERROR: Resets client and its hw-fences, signaling them without error. + * MSM_HW_FENCE_RESET_WITHOUT_DESTROY: Resets client and its hw-fences, signaling without + * destroying the fences. + */ +#define MSM_HW_FENCE_RESET_WITHOUT_ERROR BIT(0) +#define MSM_HW_FENCE_RESET_WITHOUT_DESTROY BIT(1) + +/** + * MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE: Updates client tx queue error by moving fence with error to + * beginning of queue. + */ +#define MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE BIT(0) + +/** + * MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - Maximum number of signals per client + */ +#define MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT 64 + +/** + * MSM_HW_FENCE_DBG_DUMP_QUEUES: Dumps queues information + * MSM_HW_FENCE_DBG_DUMP_TABLE: Dumps hwfence table + * MSM_HW_FENCE_DBG_DUMP_EVENTS: Dumps hwfence ctl events + */ +#define MSM_HW_FENCE_DBG_DUMP_QUEUES BIT(0) +#define MSM_HW_FENCE_DBG_DUMP_TABLE BIT(1) +#define MSM_HW_FENCE_DBG_DUMP_EVENTS BIT(2) + +/** + * struct msm_hw_fence_create_params - Creation parameters. + * + * @name : Optional parameter associating a name with the object for debug purposes. + * Only first 64 bytes are accepted, rest will be ignored. + * @handle : Pointer to fence handle (filled by function). + * @fence : Pointer to fence. + * @flags : flags for customization. + */ +struct msm_hw_fence_create_params { + const char *name; + u64 *handle; + void *fence; + u32 flags; +}; + +/** + * struct msm_hw_fence_hfi_queue_table_header - HFI queue table structure. + * @version: HFI protocol version. + * @size: Queue table size in dwords. + * @qhdr0_offset: First queue header offset (dwords) in this table. + * @qhdr_size: Queue header size. + * @num_q: Number of queues defined in this table. + * @num_active_q: Number of active queues. + */ +struct msm_hw_fence_hfi_queue_table_header { + u32 version; + u32 size; + u32 qhdr0_offset; + u32 qhdr_size; + u32 num_q; + u32 num_active_q; +}; + +/** + * struct msm_hw_fence_hfi_queue_table_header_v2 - Version 2 of HFI queue table structure. + * @version: HFI protocol version. + * @size: Queue table size in dwords. + * @qhdr0_offset: First queue header offset (dwords) in this table. + * @qhdr_size: Queue header size. + * @num_q: Number of queues defined in this table. + * @num_active_q: Number of active queues. + * @reserved: reserved memory used for 64-byte alignment + */ +struct msm_hw_fence_hfi_queue_table_header_v2 { + u32 version; + u32 size; + u32 qhdr0_offset; + u32 qhdr_size; + u32 num_q; + u32 num_active_q; + u32 reserved[10]; +}; + +/** + * struct msm_hw_fence_hfi_queue_header - HFI queue header structure. + * @status: Active = 1, Inactive = 0. + * @start_addr: Starting address of the queue. + * @type: Queue type (rx/tx). + * @queue_size: Size of the queue. + * @pkt_size: Size of the queue packet entries, + * 0 - means variable size of message in the queue, + * non-zero - size of the packet, fixed. + * @pkt_drop_cnt: Number of packets drop by sender. + * @rx_wm: Receiver watermark, applicable in event driven mode. + * @tx_wm: Sender watermark, applicable in event driven mode. + * @rx_req: Receiver sets this bit if queue is empty. + * @tx_req: Sender sets this bit if queue is full. + * @rx_irq_status: Receiver sets this bit and triggers an interrupt to the + * sender after packets are dequeued. Sender clears this bit. + * @tx_irq_status: Sender sets this bit and triggers an interrupt to the + * receiver after packets are queued. Receiver clears this bit. + * @read_index: read index of the queue. + * @write_index: write index of the queue. + */ +struct msm_hw_fence_hfi_queue_header { + u32 status; + u32 start_addr; + u32 type; + u32 queue_size; + u32 pkt_size; + u32 pkt_drop_cnt; + u32 rx_wm; + u32 tx_wm; + u32 rx_req; + u32 tx_req; + u32 rx_irq_status; + u32 tx_irq_status; + u32 read_index; + u32 write_index; +}; + +/** + * struct msm_hw_fence_hfi_queue_header - HFI queue header structure. + * @status: Active = 1, Inactive = 0. + * @start_addr: Starting address of the queue. + * @type: Queue type (rx/tx). + * @queue_size: Size of the queue. + * @pkt_size: Size of the queue packet entries, + * 0 - means variable size of message in the queue, + * non-zero - size of the packet, fixed. + * @pkt_drop_cnt: Number of packets drop by sender. + * @rx_wm: Receiver watermark, applicable in event driven mode. + * @tx_wm: Sender watermark, applicable in event driven mode. + * @rx_req: Receiver sets this bit if queue is empty. + * @tx_req: Sender sets this bit if queue is full. + * @rx_irq_status: Receiver sets this bit and triggers an interrupt to the + * sender after packets are dequeued. Sender clears this bit. + * @tx_irq_status: Sender sets this bit and triggers an interrupt to the + * receiver after packets are queued. Receiver clears this bit. + * @init_reserved: reservation for 64-byte alignment of read and write indexes + * @read_index: read index of the queue. + * @read_index_reserved: reservation for 64-byte alignment of read and write indexes + * @write_index: write index of the queue. + * @write_index_reserved: reservation for 64-byte alignment of read and write indexes + */ +struct msm_hw_fence_hfi_queue_header_v2 { + u32 status; + u32 start_addr; + u32 type; + u32 queue_size; + u32 pkt_size; + u32 pkt_drop_cnt; + u32 rx_wm; + u32 tx_wm; + u32 rx_req; + u32 tx_req; + u32 rx_irq_status; + u32 tx_irq_status; + u32 init_reserved[4]; + u32 read_index; + u32 read_index_reserved[15]; + u32 write_index; + u32 write_index_reserved[15]; +}; + +/** + * struct msm_hw_fence_mem_addr - Memory descriptor of the queue allocated by + * the fence driver for each client during + * register. + * @virtual_addr: Kernel virtual address of the queue. + * @device_addr: Physical address of the memory object. + * @size: Size of the memory. + * @mem_data: Internal pointer with the attributes of the allocation. + */ +struct msm_hw_fence_mem_addr { + void *virtual_addr; + phys_addr_t device_addr; + u64 size; + void *mem_data; +}; + +/** + * struct msm_hw_fence_cb_data - Data passed back in fence error callback. + * @data: data registered with callback + * @fence: fence signaled with error + */ +struct msm_hw_fence_cb_data { + void *data; + struct dma_fence *fence; +}; + +/** + * msm_hw_fence_error_cb: Callback function registered by waiting clients. + * Dispatched when client is waiting on a fence + * signaled with error. + * + * @handle: handle of fence signaled with error + * @error: error signed for fence + * @cb_data: pointer to struct containing opaque pointer registered with callback + * and fence information + */ +typedef void (*msm_hw_fence_error_cb_t)(u32 handle, int error, void *cb_data); + +/** + * enum hw_fence_client_id - Unique identifier of the supported clients. + * @HW_FENCE_CLIENT_ID_CTX0: GFX Client. + * @HW_FENCE_CLIENT_ID_CTL0: DPU Client 0. + * @HW_FENCE_CLIENT_ID_CTL1: DPU Client 1. + * @HW_FENCE_CLIENT_ID_CTL2: DPU Client 2. + * @HW_FENCE_CLIENT_ID_CTL3: DPU Client 3. + * @HW_FENCE_CLIENT_ID_CTL4: DPU Client 4. + * @HW_FENCE_CLIENT_ID_CTL5: DPU Client 5. + * @HW_FENCE_CLIENT_ID_VAL0: debug Validation client 0. + * @HW_FENCE_CLIENT_ID_VAL1: debug Validation client 1. + * @HW_FENCE_CLIENT_ID_VAL2: debug Validation client 2. + * @HW_FENCE_CLIENT_ID_VAL3: debug Validation client 3. + * @HW_FENCE_CLIENT_ID_VAL4: debug Validation client 4. + * @HW_FENCE_CLIENT_ID_VAL5: debug Validation client 5. + * @HW_FENCE_CLIENT_ID_VAL6: debug Validation client 6. + * @HW_FENCE_CLIENT_ID_IPE: IPE Client. + * @HW_FENCE_CLIENT_ID_VPU: VPU Client. + * @HW_FENCE_CLIENT_ID_IFE0: IFE0 Client 0. + * @HW_FENCE_CLIENT_ID_IFE1: IFE1 Client 0. + * @HW_FENCE_CLIENT_ID_IFE2: IFE2 Client 0. + * @HW_FENCE_CLIENT_ID_IFE3: IFE3 Client 0. + * @HW_FENCE_CLIENT_ID_IFE4: IFE4 Client 0. + * @HW_FENCE_CLIENT_ID_IFE5: IFE5 Client 0. + * @HW_FENCE_CLIENT_ID_IFE6: IFE6 Client 0. + * @HW_FENCE_CLIENT_ID_IFE7: IFE7 Client 0. + * @HW_FENCE_CLIENT_MAX: Max number of clients, any client must be added + * before this enum. + */ +enum hw_fence_client_id { + HW_FENCE_CLIENT_ID_CTX0 = 0x1, + HW_FENCE_CLIENT_ID_CTL0, + HW_FENCE_CLIENT_ID_CTL1, + HW_FENCE_CLIENT_ID_CTL2, + HW_FENCE_CLIENT_ID_CTL3, + HW_FENCE_CLIENT_ID_CTL4, + HW_FENCE_CLIENT_ID_CTL5, + HW_FENCE_CLIENT_ID_VAL0, + HW_FENCE_CLIENT_ID_VAL1, + HW_FENCE_CLIENT_ID_VAL2, + HW_FENCE_CLIENT_ID_VAL3, + HW_FENCE_CLIENT_ID_VAL4, + HW_FENCE_CLIENT_ID_VAL5, + HW_FENCE_CLIENT_ID_VAL6, + HW_FENCE_CLIENT_ID_IPE, + HW_FENCE_CLIENT_ID_VPU = HW_FENCE_CLIENT_ID_IPE + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IPA = HW_FENCE_CLIENT_ID_VPU + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE0 = HW_FENCE_CLIENT_ID_IPA + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE1 = HW_FENCE_CLIENT_ID_IFE0 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE2 = HW_FENCE_CLIENT_ID_IFE1 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE3 = HW_FENCE_CLIENT_ID_IFE2 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE4 = HW_FENCE_CLIENT_ID_IFE3 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE5 = HW_FENCE_CLIENT_ID_IFE4 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE6 = HW_FENCE_CLIENT_ID_IFE5 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE7 = HW_FENCE_CLIENT_ID_IFE6 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE8 = HW_FENCE_CLIENT_ID_IFE7 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE9 = HW_FENCE_CLIENT_ID_IFE8 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE10 = HW_FENCE_CLIENT_ID_IFE9 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE11 = HW_FENCE_CLIENT_ID_IFE10 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_MAX = HW_FENCE_CLIENT_ID_IFE11 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT +}; + +#if IS_ENABLED(CONFIG_QTI_HW_FENCE) +/** + * msm_hw_fence_register() - Registers a client with the HW Fence Driver. + * @client_id: ID of the client that is being registered. + * @mem_descriptor: Pointer to fill the memory descriptor. Fence + * controller driver fills this pointer with the + * memory descriptor for the rx/tx queues. + * + * This call initializes any shared memory region for the tables/queues + * required for the HW Fence Driver to communicate with Fence Controller + * for this client_id and fills the memory descriptor for the queues + * that the client hw cores need to manage. + * + * Return: Handle to the client object that must be used for further calls + * to the fence controller driver or NULL in case of error. + * + * The returned handle is used internally by the fence controller driver + * in further calls to identify the client and access any resources + * allocated for this client. + */ +void *msm_hw_fence_register( + enum hw_fence_client_id client_id, + struct msm_hw_fence_mem_addr *mem_descriptor); + +/** + * msm_hw_fence_deregister() - Deregisters a client that was previously + * registered with the HW Fence Driver. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_deregister(void *client_handle); + +/** + * msm_hw_fence_create() - Creates a new hw fence. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @params: Hw fence creation parameters containing dma fence + * to create its associated hw-fence. + * + * This call creates the hw fence and registers it with the fence + * controller. After the creation of this fence, it is a Client Driver + * responsibility to 'destroy' this fence to prevent any leakage of + * hw-fence resources. + * To destroy a fence, 'msm_hw_fence_destroy' must be called, once the + * fence is not required anymore, which is when all the references to + * the dma-fence are released. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_create(void *client_handle, + struct msm_hw_fence_create_params *params); + +/** + * msm_hw_fence_destroy() - Destroys a hw fence. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @fence: Sw dma-fence to destroy its associated hw-fence. + * + * The fence destroyed by this function, is a fence that must have been + * created by the hw fence driver through 'msm_hw_fence_create' call. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_destroy(void *client_handle, + struct dma_fence *fence); + +/** + * msm_hw_fence_destroy_with_handle() - Destroys a hw fence through its handle. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @handle: handle for hw-fence to destroy + * + * The fence destroyed by this function, is a fence that must have been + * created by the hw fence driver through 'msm_hw_fence_create' call. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle); + +/** + * msm_hw_fence_wait_update_v2() - Register or unregister the Client with the + * Fence Controller as a waiting-client of the + * list of fences received as parameter. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @fences: Pointer to an array of pointers containing the fences to + * 'wait-on' for this client. If a 'fence-array' fence is passed, + * driver will iterate through the individual 'fences' which are + * part of the 'fence-array' and will register to wait-for-all the + * individual fences of the fence-array. + * A 'fence-array' passed as parameter can only have 'individual' + * fences and cannot have another nested 'fence-array', + * otherwise this API will return failure. + * Also, all the 'fences' in this list must have a corresponding + * hw-fence that was registered by the producer of the fence, + * otherwise, this API will return failure. + * @handles: Optional pointer to an array of handles of 'fences'. + * If non-null, these handles are filled by the function. + * This list must have the same size as 'fences' if present. + * @client_data_list: Optional pointer to an array of u64 client_data + * values for each fence in 'fences'. + * If non-null, this list must have the same size as + * the 'fences' list. This client registers each fence + * with the client_data value at the same index so that + * this value is returned to the client upon signaling + * of the fence. + * If a null pointer is provided, a default value of + * zero is registered as the client_data of each fence. + * @num_fences: Number of elements in the 'fences' list (and 'handles' and + * 'client_data_list' if either or both are present). + * @reg: Boolean to indicate if register or unregister for waiting on + * the hw-fence. + * + * If the 'register' boolean is set as true, this API will register with + * the Fence Controller the Client as a consumer (i.e. 'wait-client') of + * the fences received as parameter. + * Function will return immediately after the client was registered + * (i.e this function does not wait for the fences to be signaled). + * When any of the Fences received as parameter is signaled (or all the + * fences in case of a fence-array), Fence controller will trigger the hw + * signal to notify the Client hw-core about the signaled fence (or fences + * in case of a fence array). i.e. signalization of the hw fence it is a + * hw to hw communication between Fence Controller and the Client hw-core, + * and this API is only the interface to allow the Client Driver to + * register its Client hw-core for the hw-to-hw notification. + * If the 'register' boolean is set as false, this API will unregister + * with the Fence Controller the Client as a consumer, this is used for + * cases where a Timeout waiting for a fence occurs and client drivers want + * to unregister for signal. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_wait_update_v2(void *client_handle, + struct dma_fence **fences, u64 *handles, u64 *client_data_list, u32 num_fences, bool reg); + +/** + * msm_hw_fence_wait_update() - Register or unregister the Client with the + * Fence Controller as a waiting-client of the + * list of fences received as parameter. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @fences: Pointer to an array of pointers containing the fences to + * 'wait-on' for this client. If a 'fence-array' fence is passed, + * driver will iterate through the individual 'fences' which are + * part of the 'fence-array' and will register to wait-for-all the + * individual fences of the fence-array. + * A 'fence-array' passed as parameter can only have 'individual' + * fences and cannot have another nested 'fence-array', + * otherwise this API will return failure. + * Also, all the 'fences' in this list must have a corresponding + * hw-fence that was registered by the producer of the fence, + * otherwise, this API will return failure. + * @num_fences: Number of elements in the 'fences' list. + * @reg: Boolean to indicate if register or unregister for waiting on + * the hw-fence. + * + * If the 'register' boolean is set as true, this API will register with + * the Fence Controller the Client as a consumer (i.e. 'wait-client') of + * the fences received as parameter. + * Function will return immediately after the client was registered + * (i.e this function does not wait for the fences to be signaled). + * When any of the Fences received as parameter is signaled (or all the + * fences in case of a fence-array), Fence controller will trigger the hw + * signal to notify the Client hw-core about the signaled fence (or fences + * in case of a fence array). i.e. signalization of the hw fence it is a + * hw to hw communication between Fence Controller and the Client hw-core, + * and this API is only the interface to allow the Client Driver to + * register its Client hw-core for the hw-to-hw notification. + * If the 'register' boolean is set as false, this API will unregister + * with the Fence Controller the Client as a consumer, this is used for + * cases where a Timeout waiting for a fence occurs and client drivers want + * to unregister for signal. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_wait_update(void *client_handle, + struct dma_fence **fences, u32 num_fences, bool reg); + +/** + * msm_hw_fence_reset_client() - Resets the HW Fence Client. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @reset_flags: Flags to choose the reset type. See MSM_HW_FENCE_RESET_* + * definitions. + * + * This function iterates through the HW Fences and removes the client + * from the waiting-client mask in any of the HW Fences and signal the + * fences owned by that client. + * This function should only be called by clients upon error, when clients + * did a HW reset, to make sure any HW Fence where the client was register + * for wait are removed, and any Fence owned by the client are signaled. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags); + +/** + * msm_hw_fence_reset_client_by_id() - Resets the HW Fence Client through + * its id. + * @client_id: id of client to reset + * @reset_flags: Flags to choose the reset type. See MSM_HW_FENCE_RESET_* + * definitions. + * + * This function iterates through the HW Fences and removes the client + * from the waiting-client mask in any of the HW Fences and signal the + * fences owned by that client. + * This function should only be called by clients upon error, when clients + * did a HW reset, to make sure any HW Fence where the client was register + * for wait are removed, and any Fence owned by the client are signaled. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id, u32 reset_flags); + +/** + * msm_hw_fence_update_txq() - Updates Client Tx Queue with the Fence info. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @handle: handle for fence to update in the Tx Queue. + * @flags: flags to set in the queue for the fence. + * @error: error to set in the queue for the fence. + * + * This function should only be used by clients that cannot have the Tx Queue + * updated by the Firmware or the HW Core. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error); + +/** + * msm_hw_fence_update_txq_error() - Updates error field for fence already in Tx Queue. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @handle: handle for existing fence in Tx Queue to update. + * @error: error to set in the queue for the fence. + * @update_flags: flags to choose the update type. See MSM_HW_FENCE_UPDATE_ERROR_* + * definitions. + * + * This function should only be used by clients that cannot have the Tx Queue + * updated by the Firmware or the HW Core. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, u32 update_flags); + +/** + * msm_hw_fence_trigger_signal() - Triggers signal for the tx/rx signal pair + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @ tx_client_id: id of the client triggering the signal. + * @ rx_client_id: id of the client receiving the signal. + * @ signal_id: id of the signal to trigger + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_trigger_signal(void *client_handle, u32 tx_client_id, u32 rx_client_id, + u32 signal_id); + +/** + * msm_hw_fence_register_error_cb() - Register callback to be dispatched when + * HW Fence Client is waiting for a fence + * that is signaled with error. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @cb: pointer to callback function to be invoked + * @data: opaque pointer passed back with callback + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t cb, void *data); + +/** + * msm_hw_fence_deregister_error_cb() - Deregister callback to be dispatched when + * HW Fence Client is waiting for a fence + * that is signaled with error. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_deregister_error_cb(void *client_handle); + +#else +static inline void *msm_hw_fence_register(enum hw_fence_client_id client_id, + struct msm_hw_fence_mem_addr *mem_descriptor) +{ + return NULL; +} + +static inline int msm_hw_fence_deregister(void *client_handle) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_create(void *client_handle, + struct msm_hw_fence_create_params *params) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_destroy(void *client_handle, struct dma_fence *fence) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_wait_update_v2(void *client_handle, + struct dma_fence **fences, u64 *handles, u64 *client_data_list, u32 num_fences, bool reg) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_wait_update(void *client_handle, + struct dma_fence **fences, u32 num_fences, bool reg) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id, + u32 reset_flags) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, + u32 update_flags) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_trigger_signal(void *client_handle, u32 tx_client_id, + u32 rx_client_id, u32 signal_id) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t cb, + void *data) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_deregister_error_cb(void *client_handle) +{ + return -EINVAL; +} +#endif + +#if IS_ENABLED(CONFIG_DEBUG_FS) && IS_ENABLED(CONFIG_QTI_HW_FENCE) +/** + * msm_hw_fence_dump_debug_data() - Dumps debug data information + * @client_handle: Hw fence driver client handle returned during 'msm_hw_fence_register'. + * @dump_flags: Flags to indicate which info to dump, see MSM_HW_FENCE_DBG_DUMP_** flags. + * @dump_clients_mask: Optional bitmask to indicate along with the caller of the api, which other + * clients to dump data from. E.g. a client like display might want to dump + * info of any all other clients from which it can receive fences, like gfx. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, u32 dump_clients_mask); + +/** + * msm_hw_fence_dump_debug_data() - Dumps hw-fence information for dma-fence + * @client_handle: Hw fence driver client handle returned during 'msm_hw_fence_register'. + * @fence: dma_fence to dump hw-fence information + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence); + +#else +static inline int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, + u32 dump_clients_mask) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence) +{ + return -EINVAL; +} +#endif + +#endif diff --git a/qcom/opensource/mm-drivers/hw_fence/niobe_defconfig b/qcom/opensource/mm-drivers/hw_fence/niobe_defconfig new file mode 100644 index 0000000000..ba00c1a470 --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/niobe_defconfig @@ -0,0 +1,3 @@ +CONFIG_QTI_HW_FENCE=y +CONFIG_QTI_HW_FENCE_USE_SYNX=y +CONFIG_QTI_ENABLE_HW_FENCE_DEFAULT=y \ No newline at end of file diff --git a/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_debug.c b/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_debug.c new file mode 100644 index 0000000000..ef9cbb7518 --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_debug.c @@ -0,0 +1,1516 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_debug.h" +#include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_fence.h" + +#define HW_FENCE_DEBUG_MAX_LOOPS 200 + +#define HFENCE_TBL_MSG \ + "[%d]hfence[%u] v:%d err:%u ctx:%llu seq:%llu wait:0x%llx alloc:%d f:0x%llx child_cnt:%d"\ + "%s ct:%llu tt:%llu wt:%llu ref:0x%x h_synx:%u\n" + +/* each hwfence parent includes one "32-bit" element + "," separator */ +#define HW_FENCE_MAX_PARENTS_SUBLIST_DUMP (MSM_HW_FENCE_MAX_JOIN_PARENTS * 9) +#define HW_FENCE_MAX_PARENTS_DUMP (sizeof("parent_list[] ") + HW_FENCE_MAX_PARENTS_SUBLIST_DUMP) + +/* event dump data includes one "32-bit" element + "|" separator */ +#define HW_FENCE_MAX_DATA_PER_EVENT_DUMP (HW_FENCE_EVENT_MAX_DATA * 9) + +#define HFENCE_EVT_MSG "[%d][cpu:%d][%llu] data[%d]:%s\n" + +#define ktime_compare_safe(A, B) ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0)) + +#define HFENCE_QHDR_MSG \ + "Client:%d %s q_sz_bytes:%u rd_idx:%u wr_idx:%u tx_wm:%u skips:%s start:%u factor:%u\n" +#define HFENCE_QPAYLOAD_MSG \ + "%s[%d]: hash:%llu ctx:%llu seqno:%llu f:%llu d:%llu err:%u time:%llu type:%u\n" + +u32 msm_hw_fence_debug_level = HW_FENCE_PRINTK; + +/** + * struct client_data - Structure holding the data of the debug clients. + * + * @client_id: client id. + * @dma_context: context id to create the dma-fences for the client. + * @seqno_cnt: sequence number, this is a counter to simulate the seqno for debugging. + * @client_handle: handle for the client, this is returned by the hw-fence driver after + * a successful registration of the client. + * @mem_descriptor: memory descriptor for the client-queues. This is populated by the hw-fence + * driver after a successful registration of the client. + * @list: client node. + */ +struct client_data { + int client_id; + u64 dma_context; + u64 seqno_cnt; + void *client_handle; + struct msm_hw_fence_mem_addr mem_descriptor; + struct list_head list; +}; + +static void _dump_fence_helper(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, + char *parents_dump, u32 index, u32 count) +{ + char sublist[HW_FENCE_MAX_PARENTS_SUBLIST_DUMP]; + u32 parents_cnt; + int i, len = 0; + + if (!hw_fence || !parents_dump) { + HWFNC_ERR("invalid params hw_fence:0x%pK parents_dump:0x%pK\n", hw_fence, + parents_dump); + return; + } + + memset(parents_dump, 0, sizeof(char) * HW_FENCE_MAX_PARENTS_DUMP); + if (hw_fence->parents_cnt) { + if (hw_fence->parents_cnt > MSM_HW_FENCE_MAX_JOIN_PARENTS) { + HWFNC_ERR("hfence[%u] has invalid parents_cnt:%d greater than max:%d\n", + index, hw_fence->parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS); + parents_cnt = MSM_HW_FENCE_MAX_JOIN_PARENTS; + } else { + parents_cnt = hw_fence->parents_cnt; + } + + memset(sublist, 0, sizeof(sublist)); + for (i = 0; i < parents_cnt; i++) + len += scnprintf(sublist + len, HW_FENCE_MAX_PARENTS_SUBLIST_DUMP - len, + "%llu,", hw_fence->parent_list[i]); + scnprintf(parents_dump, HW_FENCE_MAX_PARENTS_DUMP, " p:[%s]", sublist); + } + + HWFNC_DBG_DUMP(prio, HFENCE_TBL_MSG, + count, index, hw_fence->valid, hw_fence->error, hw_fence->ctx_id, hw_fence->seq_id, + hw_fence->wait_client_mask, hw_fence->fence_allocator, hw_fence->flags, + hw_fence->pending_child_cnt, parents_dump, hw_fence->fence_create_time, + hw_fence->fence_trigger_time, hw_fence->fence_wait_time, hw_fence->refcount, + hw_fence->h_synx); +} + +void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash, + u32 count) +{ + char parents_dump[HW_FENCE_MAX_PARENTS_DUMP]; + + return _dump_fence_helper(prio, hw_fence, parents_dump, hash, count); +} + +#if IS_ENABLED(CONFIG_DEBUG_FS) +static int _get_debugfs_input_client_with_min(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos, + struct hw_fence_driver_data **drv_data, int client_id_min) +{ + char buf[10]; + int client_id; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data file:0x%pK private_data:0x%pK\n", file, + file ? file->private_data : NULL); + return -EINVAL; + } + *drv_data = file->private_data; + + if (count >= sizeof(buf)) + return -EFAULT; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[count] = 0; /* end of string */ + + if (kstrtouint(buf, 0, &client_id)) + return -EFAULT; + + if (client_id < client_id_min || client_id >= (*drv_data)->clients_num) { + HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", client_id, + client_id_min, (*drv_data)->clients_num); + return -EINVAL; + } + + return client_id; +} + +static int _get_debugfs_input_client(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos, + struct hw_fence_driver_data **drv_data) +{ + return _get_debugfs_input_client_with_min(file, user_buf, count, ppos, drv_data, + HW_FENCE_CLIENT_ID_CTX0); +} + +static int _debugfs_ipcc_trigger(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos, u32 tx_client, u32 rx_client) +{ + struct hw_fence_driver_data *drv_data; + int client_id, signal_id; + + client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data); + if (client_id < 0) + return -EINVAL; + + /* Get signal-id that hw-fence driver would trigger for this client */ + signal_id = hw_fence_ipcc_get_signal_id(drv_data, client_id); + if (signal_id < 0) + return -EINVAL; + + HWFNC_DBG_IRQ("client_id:%d ipcc write tx_client:%d rx_client:%d signal_id:%d qtime:%llu\n", + client_id, tx_client, rx_client, signal_id, hw_fence_get_qtime(drv_data)); + hw_fence_ipcc_trigger_signal(drv_data, tx_client, rx_client, signal_id); + + return count; +} + +/** + * hw_fence_dbg_ipcc_write() - debugfs write to trigger an ipcc irq. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter a hw-fence driver client_id, and triggers an ipcc signal + * from apps to apps for that client id. + */ +static ssize_t hw_fence_dbg_ipcc_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hw_fence_driver_data *drv_data = file->private_data; + + return _debugfs_ipcc_trigger(file, user_buf, count, ppos, drv_data->ipcc_client_pid, + drv_data->ipcc_fctl_vid); +} + +/** + * hw_fence_dbg_ipcc_dpu_write() - debugfs write to trigger an ipcc irq to dpu core. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter a hw-fence driver client_id, and triggers an ipcc signal + * from apps to dpu for that client id. + */ +static ssize_t hw_fence_dbg_ipcc_dpu_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hw_fence_driver_data *drv_data = file->private_data; + + return _debugfs_ipcc_trigger(file, user_buf, count, ppos, drv_data->ipcc_client_pid, + hw_fence_ipcc_get_client_virt_id(drv_data, HW_FENCE_CLIENT_ID_CTL0)); + +} + +static const struct file_operations hw_fence_dbg_ipcc_dpu_fops = { + .open = simple_open, + .write = hw_fence_dbg_ipcc_dpu_write, +}; + +static const struct file_operations hw_fence_dbg_ipcc_fops = { + .open = simple_open, + .write = hw_fence_dbg_ipcc_write, +}; + +struct client_data *_get_client_node(struct hw_fence_driver_data *drv_data, u32 client_id) +{ + struct client_data *node = NULL; + bool found = false; + + mutex_lock(&drv_data->debugfs_data.clients_list_lock); + list_for_each_entry(node, &drv_data->debugfs_data.clients_list, list) { + if (node->client_id == client_id) { + found = true; + break; + } + } + mutex_unlock(&drv_data->debugfs_data.clients_list_lock); + + return found ? node : NULL; +} + +/** + * hw_fence_dbg_reset_client_wr() - debugfs write to trigger reset in a debug hw-fence client. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter a hw-fence driver client_id, and triggers a reset for + * this client. Note that this operation will only perform on hw-fence clients created through + * the debug framework. + */ +static ssize_t hw_fence_dbg_reset_client_wr(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + int client_id, ret; + struct client_data *client_info; + struct hw_fence_driver_data *drv_data; + + client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data); + if (client_id < 0) + return -EINVAL; + + client_info = _get_client_node(drv_data, client_id); + if (!client_info || IS_ERR_OR_NULL(client_info->client_handle)) { + HWFNC_ERR("client:%d not registered as debug client\n", client_id); + return -EINVAL; + } + + HWFNC_DBG_H("resetting client: %d\n", client_id); + ret = msm_hw_fence_reset_client(client_info->client_handle, 0); + if (ret) + HWFNC_ERR("failed to reset client:%d\n", client_id); + + return count; +} + +/** + * hw_fence_dbg_register_clients_wr() - debugfs write to register a client with the hw-fence + * driver for debugging. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter a hw-fence driver client_id to register for debug. + * Note that if the client_id received was already registered by any other driver, the + * registration here will fail. + */ +static ssize_t hw_fence_dbg_register_clients_wr(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + int client_id; + struct client_data *client_info; + struct hw_fence_driver_data *drv_data; + + client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data); + if (client_id < 0) + return -EINVAL; + + /* we cannot create same debug client twice */ + if (_get_client_node(drv_data, client_id)) { + HWFNC_ERR("client:%d already registered as debug client\n", client_id); + return -EINVAL; + } + + client_info = kzalloc(sizeof(*client_info), GFP_KERNEL); + if (!client_info) + return -ENOMEM; + + HWFNC_DBG_H("register client %d\n", client_id); + client_info->client_handle = msm_hw_fence_register(client_id, + &client_info->mem_descriptor); + if (IS_ERR_OR_NULL(client_info->client_handle)) { + HWFNC_ERR("error registering as debug client:%d\n", client_id); + client_info->client_handle = NULL; + return -EFAULT; + } + + client_info->dma_context = dma_fence_context_alloc(1); + client_info->client_id = client_id; + + mutex_lock(&drv_data->debugfs_data.clients_list_lock); + list_add(&client_info->list, &drv_data->debugfs_data.clients_list); + mutex_unlock(&drv_data->debugfs_data.clients_list_lock); + + return count; +} + +/** + * hw_fence_dbg_tx_and_signal_clients_wr() - debugfs write to simulate the lifecycle of a hw-fence. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter the number of iterations that the simulation will run, + * each iteration will: create, signal, register-for-signal and destroy a hw-fence. + * Note that this simulation relies in the user first registering the clients as debug-clients + * through the debugfs 'hw_fence_dbg_register_clients_wr'. If the clients are not previously + * registered as debug-clients, this simulation will fail and won't run. + */ +static ssize_t hw_fence_dbg_tx_and_signal_clients_wr(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + u32 input_data, client_id_src, client_id_dst, tx_client, rx_client; + struct client_data *client_info_src, *client_info_dst; + struct hw_fence_driver_data *drv_data; + struct msm_hw_fence_client *hw_fence_client, *hw_fence_client_dst; + u64 context, seqno, hash; + char buf[10]; + int signal_id, ret; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data file:0x%pK private_data:0x%pK\n", file, + file ? file->private_data : NULL); + return -EINVAL; + } + drv_data = file->private_data; + + if (count >= sizeof(buf)) + return -EFAULT; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[count] = 0; /* end of string */ + + if (kstrtouint(buf, 0, &input_data)) + return -EFAULT; + + if (input_data <= 0) { + HWFNC_ERR("won't do anything, write value greather than 0 to start..\n"); + return 0; + } else if (input_data > HW_FENCE_DEBUG_MAX_LOOPS) { + HWFNC_ERR("requested loops:%d exceed max:%d, setting max\n", input_data, + HW_FENCE_DEBUG_MAX_LOOPS); + input_data = HW_FENCE_DEBUG_MAX_LOOPS; + } + + client_id_src = HW_FENCE_CLIENT_ID_CTL0; + client_id_dst = HW_FENCE_CLIENT_ID_CTL1; + + client_info_src = _get_client_node(drv_data, client_id_src); + client_info_dst = _get_client_node(drv_data, client_id_dst); + + if (!client_info_src || IS_ERR_OR_NULL(client_info_src->client_handle) || + !client_info_dst || IS_ERR_OR_NULL(client_info_dst->client_handle)) { + /* Make sure we registered this client through debugfs */ + HWFNC_ERR("client_id_src:%d or client_id_dst:%d not registered as debug client!\n", + client_id_src, client_id_dst); + return -EINVAL; + } + + hw_fence_client = (struct msm_hw_fence_client *)client_info_src->client_handle; + hw_fence_client_dst = (struct msm_hw_fence_client *)client_info_dst->client_handle; + + while (drv_data->debugfs_data.create_hw_fences && input_data > 0) { + + /***********************************************************/ + /***** SRC CLIENT - CREATE HW FENCE & TX QUEUE UPDATE ******/ + /***********************************************************/ + + /* we will use the context and the seqno of the source client */ + context = client_info_src->dma_context; + seqno = client_info_src->seqno_cnt; + + /* linear increment of the seqno for the src client*/ + client_info_src->seqno_cnt++; + + /* Create hw fence for src client */ + ret = hw_fence_create(drv_data, hw_fence_client, context, context, seqno, &hash); + if (ret) { + HWFNC_ERR("Error creating HW fence\n"); + goto exit; + } + + /* Write to Tx queue */ + hw_fence_update_queue(drv_data, hw_fence_client, context, seqno, hash, + 0, 0, 0, HW_FENCE_TX_QUEUE - 1); /* no flags and no error */ + + /**********************************************/ + /***** DST CLIENT - REGISTER WAIT CLIENT ******/ + /**********************************************/ + /* use same context and seqno that src client used to create fence */ + ret = hw_fence_register_wait_client(drv_data, NULL, hw_fence_client_dst, context, + seqno, &hash, 0); + if (ret) { + HWFNC_ERR("failed to register for wait\n"); + return -EINVAL; + } + + /*********************************************/ + /***** SRC CLIENT - TRIGGER IPCC SIGNAL ******/ + /*********************************************/ + + /* AFTER THIS IS WHEN SVM WILL GET CALLED AND WILL PROCESS SRC AND DST CLIENTS */ + + /* Trigger IPCC for SVM to read the queue */ + + /* Get signal-id that hw-fence driver would trigger for this client */ + signal_id = dbg_out_clients_signal_map_no_dpu[client_id_src].ipc_signal_id; + if (signal_id < 0) + return -EINVAL; + + /* Write to ipcc to trigger the irq */ + tx_client = drv_data->ipcc_client_pid; + rx_client = drv_data->ipcc_client_vid; + HWFNC_DBG_IRQ("client:%d tx_client:%d rx_client:%d signal:%d delay:%d in_data%d\n", + client_id_src, tx_client, rx_client, signal_id, + drv_data->debugfs_data.hw_fence_sim_release_delay, input_data); + + hw_fence_ipcc_trigger_signal(drv_data, tx_client, rx_client, signal_id); + + /********************************************/ + /******** WAIT ******************************/ + /********************************************/ + + /* wait between iterations */ + usleep_range(drv_data->debugfs_data.hw_fence_sim_release_delay, + (drv_data->debugfs_data.hw_fence_sim_release_delay + 5)); + + /******************************************/ + /***** SRC CLIENT - CLEANUP HW FENCE ******/ + /******************************************/ + + /* cleanup hw fence for src client */ + ret = hw_fence_destroy_with_hash(drv_data, hw_fence_client, hash); + if (ret) { + HWFNC_ERR("Error destroying HW fence\n"); + goto exit; + } + + input_data--; + } /* LOOP.. */ + +exit: + return count; +} + +/** + * hw_fence_dbg_create_wr() - debugfs write to simulate the creation of a hw-fence. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter the client-id, for which the hw-fence will be created. + * Note that this simulation relies in the user first registering the client as a debug-client + * through the debugfs 'hw_fence_dbg_register_clients_wr'. If the client is not previously + * registered as debug-client, this simulation will fail and won't run. + */ +static ssize_t hw_fence_dbg_create_wr(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct msm_hw_fence_create_params params; + struct hw_fence_driver_data *drv_data; + struct client_data *client_info; + struct hw_dma_fence *hw_dma_fence; + struct dma_fence *fence; + static u64 hw_fence_dbg_seqno = 1; + int client_id, ret; + u64 hash; + + client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data); + if (client_id < 0) + return -EINVAL; + + client_info = _get_client_node(drv_data, client_id); + if (!client_info || IS_ERR_OR_NULL(client_info->client_handle)) { + HWFNC_ERR("client:%d not registered as debug client\n", client_id); + return -EINVAL; + } + + fence = hw_dma_fence_init(client_info->client_handle, client_info->dma_context, + hw_fence_dbg_seqno); + if (IS_ERR_OR_NULL(fence)) + return -EINVAL; + hw_dma_fence = (struct hw_dma_fence *)fence; + + params.fence = fence; + params.handle = &hash; + ret = msm_hw_fence_create(client_info->client_handle, ¶ms); + if (ret) { + HWFNC_ERR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n", + client_id, client_info->dma_context, hw_fence_dbg_seqno); + dma_fence_put(fence); + return -EINVAL; + } + hw_fence_dbg_seqno++; + + /* keep handle in dma_fence, to destroy hw-fence during release */ + hw_dma_fence->client_handle = client_info->client_handle; + + return count; +} + +static inline int _dump_fence(struct msm_hw_fence *hw_fence, char *buf, int len, int max_size, + u32 index, u32 cnt) +{ + int ret; + char parents_dump[HW_FENCE_MAX_PARENTS_DUMP]; + + _dump_fence_helper(HW_FENCE_INFO, hw_fence, parents_dump, index, cnt); + + ret = scnprintf(buf + len, max_size - len, HFENCE_TBL_MSG, + cnt, index, hw_fence->valid, hw_fence->error, hw_fence->ctx_id, hw_fence->seq_id, + hw_fence->wait_client_mask, hw_fence->fence_allocator, hw_fence->flags, + hw_fence->pending_child_cnt, parents_dump, hw_fence->fence_create_time, + hw_fence->fence_trigger_time, hw_fence->fence_wait_time, hw_fence->refcount, + hw_fence->h_synx); + + return ret; +} + +void hw_fence_debug_dump_table(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data) +{ + u32 i, cnt = 0; + struct msm_hw_fence *hw_fence; + + for (i = 0; i < drv_data->hw_fences_tbl_cnt; i++) { + hw_fence = &drv_data->hw_fences_tbl[i]; + if (!hw_fence->valid) + continue; + hw_fence_debug_dump_fence(prio, hw_fence, i, cnt); + cnt++; + } +} + +static int dump_single_entry(struct hw_fence_driver_data *drv_data, char *buf, u32 *index, + int max_size) +{ + struct msm_hw_fence *hw_fence; + u64 context, seqno, hash = 0; + int len = 0; + + context = drv_data->debugfs_data.context_rd; + seqno = drv_data->debugfs_data.seqno_rd; + + hw_fence = msm_hw_fence_find(drv_data, NULL, context, context, seqno, &hash); + if (!hw_fence) { + HWFNC_ERR("no valid hfence found for context:%llu seqno:%llu hash:%llu", + context, seqno, hash); + len = scnprintf(buf + len, max_size - len, + "no valid hfence found for context:%llu seqno:%llu hash:%llu\n", + context, seqno, hash); + + goto exit; + } + + len = _dump_fence(hw_fence, buf, len, max_size, hash, 0); + hw_fence_destroy_with_hash(drv_data, NULL, hash); /* release ref from msm_hw_fence_find */ + +exit: + /* move idx to end of table to stop the dump */ + *index = drv_data->hw_fences_tbl_cnt; + + return len; +} + +static int dump_full_table(struct hw_fence_driver_data *drv_data, char *buf, u32 *index, + u32 *cnt, int max_size, int entry_size) +{ + struct msm_hw_fence *hw_fence; + int len = 0; + + while (((*index)++ < drv_data->hw_fences_tbl_cnt) && (len < (max_size - entry_size))) { + hw_fence = &drv_data->hw_fences_tbl[*index]; + + if (!hw_fence->valid) + continue; + + len += _dump_fence(hw_fence, buf, len, max_size, *index, *cnt); + (*cnt)++; + } + + return len; +} + +static void _find_earliest_event(struct hw_fence_driver_data *drv_data, u32 *start_index, + u64 *start_time) +{ + u32 i; + + if (!start_index || !start_time) { + HWFNC_ERR("invalid params start_index:0x%pK start_time:0x%pK\n", start_index, + start_time); + return; + } + + mb(); /* make sure data is ready before read */ + for (i = 0; i < drv_data->total_events; i++) { + u64 time = drv_data->events[i].time; + + if (time && (!*start_time || time < *start_time)) { + *start_time = time; + *start_index = i; + } + } +} + +static void _dump_event(enum hw_fence_drv_prio prio, struct msm_hw_fence_event *event, + char *data, u32 index) +{ + u32 data_cnt; + int i, len = 0; + + if (!event || !data) { + HWFNC_ERR("invalid params event:0x%pK data:0x%pK\n", event, data); + return; + } + + memset(data, 0, sizeof(char) * HW_FENCE_MAX_DATA_PER_EVENT_DUMP); + if (event->data_cnt > HW_FENCE_EVENT_MAX_DATA) { + HWFNC_ERR("event[%d] has invalid data_cnt:%u greater than max_data_cnt:%u\n", + index, event->data_cnt, HW_FENCE_EVENT_MAX_DATA); + data_cnt = HW_FENCE_EVENT_MAX_DATA; + } else { + data_cnt = event->data_cnt; + } + + for (i = 0; i < data_cnt; i++) + len += scnprintf(data + len, HW_FENCE_MAX_DATA_PER_EVENT_DUMP - len, + "%x|", event->data[i]); + + HWFNC_DBG_DUMP(prio, HFENCE_EVT_MSG, index, event->cpu, event->time, event->data_cnt, data); +} + +void hw_fence_debug_dump_events(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data) +{ + char data[HW_FENCE_MAX_DATA_PER_EVENT_DUMP]; + u32 start_index; + u64 start_time; + int i; + + if (!drv_data->events) { + HWFNC_ERR("events not supported\n"); + return; + } + + _find_earliest_event(drv_data, &start_index, &start_time); + for (i = start_index; i < drv_data->total_events && drv_data->events[i].time; i++) + _dump_event(prio, &drv_data->events[i], data, i); + for (i = 0; i < start_index; i++) + _dump_event(prio, &drv_data->events[i], data, i); +} + +/** + * hw_fence_dbg_dump_events_rd() - debugfs read to dump the fctl events. + * @file: file handler. + * @user_buf: user buffer content for debugfs. + * @user_buf_size: size of the user buffer. + * @ppos: position offset of the user buffer. + */ +static ssize_t hw_fence_dbg_dump_events_rd(struct file *file, char __user *user_buf, + size_t user_buf_size, loff_t *ppos) +{ + struct hw_fence_driver_data *drv_data; + u32 entry_size = sizeof(HFENCE_EVT_MSG), max_size = SZ_4K; + char *buf = NULL; + int len = 0; + static u64 start_time; + static int index, start_index; + static bool wraparound; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data file:0x%pK private_data:0x%pK\n", file, + file ? file->private_data : NULL); + return -EINVAL; + } + drv_data = file->private_data; + + if (!drv_data->events) { + HWFNC_ERR("events not supported\n"); + return -EINVAL; + } + + if (wraparound && index >= start_index) { + HWFNC_DBG_H("no more data index:%d total_events:%d\n", index, + drv_data->total_events); + start_time = 0; + index = 0; + wraparound = false; + return 0; + } + + if (user_buf_size < entry_size) { + HWFNC_ERR("Not enough buff size:%zu to dump entries:%d\n", user_buf_size, + entry_size); + return -EINVAL; + } + + buf = kzalloc(max_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* find index of earliest event */ + if (!start_time) { + _find_earliest_event(drv_data, &start_index, &start_time); + index = start_index; + HWFNC_DBG_H("events:0x%pK start_index:%d start_time:%llu total_events:%d\n", + drv_data->events, start_index, start_time, drv_data->total_events); + } + + HWFNC_DBG_H("++ dump_events index:%d qtime:%llu\n", index, hw_fence_get_qtime(drv_data)); + while ((!wraparound || index < start_index) && len < (max_size - entry_size)) { + char data[HW_FENCE_MAX_DATA_PER_EVENT_DUMP]; + + if (drv_data->events[index].time) { + _dump_event(HW_FENCE_INFO, &drv_data->events[index], data, index); + len += scnprintf(buf + len, max_size - len, HFENCE_EVT_MSG, index, + drv_data->events[index].cpu, drv_data->events[index].time, + drv_data->events[index].data_cnt, data); + } + + index++; + if (index >= drv_data->total_events) { + index = 0; + wraparound = true; + } + } + HWFNC_DBG_H("-- dump_events: index:%d qtime:%llu\n", index, hw_fence_get_qtime(drv_data)); + + if (len < 0 || len > user_buf_size) { + HWFNC_ERR("len:%d invalid buff size:%zu\n", len, user_buf_size); + len = 0; + } + + if (len == 0) { + HWFNC_DBG_H("not printing anything to output because len:0 buf_size:%zu\n", + user_buf_size); + goto exit; + } + + if (copy_to_user(user_buf, buf, len)) { + HWFNC_ERR("failed to copy to user!\n"); + len = -EFAULT; + goto exit; + } + *ppos += len; +exit: + kfree(buf); + return len; +} + +static int _dump_queue_header(struct hw_fence_driver_data *drv_data, enum hw_fence_drv_prio prio, + struct msm_hw_fence_queue *queue, int client_id, int queue_type, u32 **rd_idx_ptr, + u32 **wr_idx_ptr, u32 **tx_wm_ptr) +{ + if (!drv_data || !queue || !rd_idx_ptr || !wr_idx_ptr || !tx_wm_ptr) { + HWFNC_ERR("invalid drv_data:0x%pK q:0x%pK rd_idx:0x%pK wr_idx:0x%pK tx_wm:0x%pK\n", + drv_data, queue, rd_idx_ptr, wr_idx_ptr, tx_wm_ptr); + return -EINVAL; + } + + hw_fence_get_queue_idx_ptrs(drv_data, queue->va_header, rd_idx_ptr, wr_idx_ptr, + tx_wm_ptr); + + HWFNC_DBG_DUMP(prio, HFENCE_QHDR_MSG, client_id, _get_queue_type(queue_type), + queue->q_size_bytes, **rd_idx_ptr, **wr_idx_ptr, **tx_wm_ptr, + queue->skip_wr_idx ? "true" : "false", queue->rd_wr_idx_start, + queue->rd_wr_idx_factor); + + return 0; +} + +static struct msm_hw_fence_queue_payload *_dump_queue_payload(enum hw_fence_drv_prio prio, + struct msm_hw_fence_queue *queue, int index, int queue_type) +{ + struct msm_hw_fence_queue_payload *payload; + u32 *read_ptr; + u64 timestamp; + + read_ptr = ((u32 *)queue->va_queue + + (index * (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)))); + payload = (struct msm_hw_fence_queue_payload *)read_ptr; + timestamp = (u64)payload->timestamp_lo | ((u64)payload->timestamp_hi << 32); + HWFNC_DBG_DUMP(prio, HFENCE_QPAYLOAD_MSG, _get_queue_type(queue_type), + index, payload->hash, payload->ctxt_id, payload->seqno, payload->flags, + payload->client_data, payload->error, timestamp, payload->type); + + return payload; +} + +static void _dump_queue(struct hw_fence_driver_data *drv_data, enum hw_fence_drv_prio prio, + struct msm_hw_fence_client *hw_fence_client, int queue_type) +{ + struct msm_hw_fence_queue *queue; + u32 queue_entries, *rd_idx_ptr, *wr_idx_ptr, *tx_wm_ptr; + int i; + + queue = &hw_fence_client->queues[queue_type]; + + if ((queue_type > hw_fence_client->queues_num) || !queue || !queue->va_header + || !queue->va_queue) { + HWFNC_ERR("Cannot dump client:%d q_type:%s q_ptr:0x%pK q_header:0x%pK q_va:0x%pK\n", + hw_fence_client->client_id, _get_queue_type(queue_type), queue, + queue ? queue->va_header : NULL, queue ? queue->va_queue : NULL); + return; + } + + mb(); /* make sure data is ready before read */ + _dump_queue_header(drv_data, prio, queue, hw_fence_client->client_id, queue_type, + &rd_idx_ptr, &wr_idx_ptr, &tx_wm_ptr); + queue_entries = queue->q_size_bytes / HW_FENCE_CLIENT_QUEUE_PAYLOAD; + + for (i = 0; i < queue_entries; i++) { + _dump_queue_payload(prio, queue, i, queue_type); + } +} + +void hw_fence_debug_dump_queues(struct hw_fence_driver_data *drv_data, enum hw_fence_drv_prio prio, + struct msm_hw_fence_client *hw_fence_client) +{ + if (!hw_fence_client) { + HWFNC_ERR("Invalid params client:0x%pK\n", hw_fence_client); + return; + } + + if (hw_fence_client->queues_num == HW_FENCE_CLIENT_QUEUES) + _dump_queue(drv_data, prio, hw_fence_client, HW_FENCE_RX_QUEUE - 1); + _dump_queue(drv_data, prio, hw_fence_client, HW_FENCE_TX_QUEUE - 1); +} + +/** + * hw_fence_dbg_dump_queues_wr() - debugfs wr to control the dump of hw-fences queues. + * @file: file handler. + * @user_buf: user buffer content for debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter either zero to dump the ctrl queues or the client_id for + * which to dump client queues in the next read of the same debugfs node. + */ +static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct hw_fence_driver_data *drv_data; + int client_id; + + client_id = _get_debugfs_input_client_with_min(file, user_buf, count, ppos, &drv_data, 0); + if (client_id < 0) + return -EINVAL; + + drv_data->debugfs_data.client_id_rd = client_id; + + return count; +} + +/** + * hw_fence_dbg_dump_queues_rd() - debugfs read to dump ctrl or client queues. + * @file: file handler. + * @user_buf: user buffer content for debugfs. + * @user_buf_size: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs dumps either hw-fence ctrl queues or the client queues of a given client. The user + * can provide zero (to print the ctrl queues) or the client_id of interest by writing to this + * debugfs node (see documentation for the write in 'hw_fence_dbg_dump_queues_wr'). By default, + * dumps the ctrl queues. + */ +static ssize_t hw_fence_dbg_dump_queues_rd(struct file *file, char __user *user_buf, + size_t user_buf_size, loff_t *ppos) +{ + struct hw_fence_driver_data *drv_data; + struct msm_hw_fence_client *hw_fence_client; + struct msm_hw_fence_queue *queue; + u32 entry_size = sizeof(HFENCE_QPAYLOAD_MSG), max_size = SZ_4K; + u32 client_id, queue_entries, queues_num, *rd_idx_ptr, *wr_idx_ptr, *tx_wm_ptr; + char *buf = NULL; + int len = 0; + static u32 index, queue_type; + static bool qhdr_dumped; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data file:0x%pK private_data:0x%pK\n", file, + file ? file->private_data : NULL); + return -EINVAL; + } + drv_data = file->private_data; + + client_id = drv_data->debugfs_data.client_id_rd; + if (client_id == 0) { + queue = &drv_data->ctrl_queues[queue_type]; + queues_num = HW_FENCE_CTRL_QUEUES; + } else { + if (!drv_data->clients[client_id]) { + HWFNC_ERR("client %d not initialized\n", client_id); + return -EINVAL; + } + hw_fence_client = drv_data->clients[client_id]; + queue = &hw_fence_client->queues[queue_type]; + queues_num = hw_fence_client->queues_num; + } + queue_entries = queue->q_size_bytes / HW_FENCE_CLIENT_QUEUE_PAYLOAD; + + if (queue_type >= queues_num) { + HWFNC_DBG_H("no more data client_id:%d q_num:%u q_entries:%u\n", client_id, + queues_num, queue_entries); + queue_type = 0; + index = 0; + return 0; + } + + if (!queue || !queue->va_header || !queue->va_queue) { + HWFNC_ERR("client:%d %s q_ptr:0x%pK qhdr_va:0x%pK q_va:0x%pK uninitialized\n", + client_id, _get_queue_type(queue_type), queue, + queue ? queue->va_header : NULL, queue ? queue->va_queue : NULL); + return -EINVAL; + } + + if (user_buf_size < entry_size) { + HWFNC_ERR("Not enough buff size:%zu to dump entries:%d\n", user_buf_size, + entry_size); + return -EINVAL; + } + + buf = kvzalloc(max_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (!qhdr_dumped) { + mb(); /* make sure data is ready before read */ + _dump_queue_header(drv_data, HW_FENCE_INFO, queue, client_id, queue_type, + &rd_idx_ptr, &wr_idx_ptr, &tx_wm_ptr); + len += scnprintf(buf + len, max_size - len, HFENCE_QHDR_MSG, client_id, + _get_queue_type(queue_type), queue->q_size_bytes, *rd_idx_ptr, *wr_idx_ptr, + *tx_wm_ptr, queue->skip_wr_idx ? "true" : "false", queue->rd_wr_idx_start, + queue->rd_wr_idx_factor); + qhdr_dumped = true; + } + + for (; index < queue_entries && len < (max_size - entry_size); index++) { + struct msm_hw_fence_queue_payload *payload; + u64 timestamp; + + payload = _dump_queue_payload(HW_FENCE_INFO, queue, index, queue_type); + + if (!(payload->hash || payload->ctxt_id || payload->seqno || payload->flags || + payload->client_data || payload->error || payload->timestamp_lo || + payload->timestamp_hi || payload->type)) + continue; + + timestamp = (u64)payload->timestamp_lo | ((u64)payload->timestamp_hi << 32); + len += scnprintf(buf + len, max_size - len, HFENCE_QPAYLOAD_MSG, + _get_queue_type(queue_type), index, payload->hash, payload->ctxt_id, + payload->seqno, payload->flags, payload->client_data, payload->error, + timestamp, payload->type); + } + if (index >= queue_entries) { + index = 0; + queue_type++; + qhdr_dumped = false; + } + + if (len <= 0 || len > user_buf_size) { + HWFNC_ERR("len:%d invalid buff size:%zu\n", len, user_buf_size); + len = 0; + goto exit; + } + + if (copy_to_user(user_buf, buf, len)) { + HWFNC_ERR("failed to copy to user!\n"); + len = -EFAULT; + goto exit; + } + *ppos += len; +exit: + kvfree(buf); + return len; +} + +/** + * hw_fence_dbg_dump_table_rd() - debugfs read to dump the hw-fences table. + * @file: file handler. + * @user_buf: user buffer content for debugfs. + * @user_buf_size: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs dumps the hw-fence table. By default debugfs will dump all the valid entries of the + * whole table. However, if user only wants to dump only one particular entry, user can provide the + * context-id and seqno of the dma-fence of interest by writing to this debugfs node (see + * documentation for the write in 'hw_fence_dbg_dump_table_wr'). + */ +static ssize_t hw_fence_dbg_dump_table_rd(struct file *file, char __user *user_buf, + size_t user_buf_size, loff_t *ppos) +{ + struct hw_fence_driver_data *drv_data; + int entry_size = sizeof(HFENCE_TBL_MSG); + char *buf = NULL; + int len = 0, max_size = SZ_4K; + static u32 index, cnt; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data file:0x%pK private_data:0x%pK\n", file, + file ? file->private_data : NULL); + return -EINVAL; + } + drv_data = file->private_data; + + if (!drv_data->hw_fences_tbl) { + HWFNC_ERR("Failed to dump table: Null fence table\n"); + return -EINVAL; + } + + if (index >= drv_data->hw_fences_tbl_cnt) { + HWFNC_DBG_H("no more data index:%d cnt:%d\n", index, drv_data->hw_fences_tbl_cnt); + index = cnt = 0; + return 0; + } + + if (user_buf_size < entry_size) { + HWFNC_ERR("Not enough buff size:%lu to dump entries:%d\n", user_buf_size, + entry_size); + return -EINVAL; + } + + buf = kzalloc(max_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len = drv_data->debugfs_data.entry_rd ? + dump_single_entry(drv_data, buf, &index, max_size) : + dump_full_table(drv_data, buf, &index, &cnt, max_size, entry_size); + + if (len < 0 || len > user_buf_size) { + HWFNC_ERR("len:%d invalid buff size:%zu\n", len, user_buf_size); + len = 0; + } + + if (len == 0) { + HWFNC_DBG_H("not printing anything to output because len:0 buf_size:%zu\n", + user_buf_size); + goto exit; + } + + if (copy_to_user(user_buf, buf, len)) { + HWFNC_ERR("failed to copy to user!\n"); + len = -EFAULT; + goto exit; + } + *ppos += len; +exit: + kfree(buf); + return len; +} + +/** + * hw_fence_dbg_dump_table_wr() - debugfs write to control the dump of the hw-fences table. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @user_buf_size: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameters the settings to dump either the whole hw-fences table + * or only one element on the table in the next read of the same debugfs node. + * If this debugfs receives two input values, it will interpret them as the 'context-id' and the + * 'sequence-id' to dump from the hw-fence table in the subsequent reads of the debugfs. + * Otherwise, if the debugfs receives only one input value, the next read from the debugfs, will + * dump the whole hw-fences table. + */ +static ssize_t hw_fence_dbg_dump_table_wr(struct file *file, + const char __user *user_buf, size_t user_buf_size, loff_t *ppos) +{ + struct hw_fence_driver_data *drv_data; + u64 param_0, param_1; + char buf[24]; + int num_input_params; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data file:0x%pK private_data:0x%pK\n", file, + file ? file->private_data : NULL); + return -EINVAL; + } + drv_data = file->private_data; + + if (user_buf_size >= sizeof(buf)) { + HWFNC_ERR("wrong size:%lu size:%lu\n", user_buf_size, sizeof(buf)); + return -EFAULT; + } + + if (copy_from_user(buf, user_buf, user_buf_size)) + return -EFAULT; + + buf[user_buf_size] = 0; /* end of string */ + + /* read the input params */ + num_input_params = sscanf(buf, "%llu %llu", ¶m_0, ¶m_1); + + if (num_input_params == 2) { /* if debugfs receives two input params */ + drv_data->debugfs_data.context_rd = param_0; + drv_data->debugfs_data.seqno_rd = param_1; + drv_data->debugfs_data.entry_rd = true; + } else if (num_input_params == 1) { /* if debugfs receives one param */ + drv_data->debugfs_data.context_rd = 0; + drv_data->debugfs_data.seqno_rd = 0; + drv_data->debugfs_data.entry_rd = false; + } else { + HWFNC_ERR("invalid num params:%d\n", num_input_params); + return -EFAULT; + } + + return user_buf_size; +} + + +static inline void _cleanup_fences(int i, struct dma_fence **fences, spinlock_t **fences_lock) +{ + struct hw_dma_fence *dma_fence; + int fence_idx; + + for (fence_idx = i; fence_idx >= 0 ; fence_idx--) { + kfree(fences_lock[fence_idx]); + + dma_fence = to_hw_dma_fence(fences[fence_idx]); + kfree(dma_fence); + } + + kfree(fences_lock); + kfree(fences); +} + +/** + * hw_fence_dbg_create_join_fence() - debugfs write to simulate the lifecycle of a join hw-fence. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs will: create, signal, register-for-signal and destroy a join hw-fence. + * Note that this simulation relies in the user first registering the clients as debug-clients + * through the debugfs 'hw_fence_dbg_register_clients_wr'. If the clients are not previously + * registered as debug-clients, this simulation will fail and won't run. + */ +static ssize_t hw_fence_dbg_create_join_fence(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct dma_fence_array *fence_array; + struct hw_fence_driver_data *drv_data; + struct dma_fence *fence_array_fence; + struct client_data *client_info_src, *client_info_dst; + u64 hw_fence_dbg_seqno = 1; + int client_id_src, client_id_dst; + struct msm_hw_fence_create_params params; + int i, ret = 0; + u64 hash; + struct msm_hw_fence_client *hw_fence_client; + int tx_client, rx_client, signal_id; + + /* creates 3 fences and a parent fence */ + int num_fences = 3; + struct dma_fence **fences = NULL; + spinlock_t **fences_lock = NULL; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data file:0x%pK private_data:0x%pK\n", file, + file ? file->private_data : NULL); + return -EINVAL; + } + drv_data = file->private_data; + client_id_src = HW_FENCE_CLIENT_ID_CTL0; + client_id_dst = HW_FENCE_CLIENT_ID_CTL1; + client_info_src = _get_client_node(drv_data, client_id_src); + client_info_dst = _get_client_node(drv_data, client_id_dst); + if (!client_info_src || IS_ERR_OR_NULL(client_info_src->client_handle) || + !client_info_dst || IS_ERR_OR_NULL(client_info_dst->client_handle)) { + HWFNC_ERR("client_src:%d or client:%d is not register as debug client\n", + client_id_src, client_id_dst); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_info_src->client_handle; + + fences_lock = kcalloc(num_fences, sizeof(*fences_lock), GFP_KERNEL); + if (!fences_lock) + return -ENOMEM; + + fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); + if (!fences) { + kfree(fences_lock); + return -ENOMEM; + } + + /* Create the array of dma fences */ + for (i = 0; i < num_fences; i++) { + struct hw_dma_fence *dma_fence; + + fences_lock[i] = kzalloc(sizeof(spinlock_t), GFP_KERNEL); + if (!fences_lock[i]) { + _cleanup_fences(i, fences, fences_lock); + return -ENOMEM; + } + + dma_fence = kzalloc(sizeof(*dma_fence), GFP_KERNEL); + if (!dma_fence) { + _cleanup_fences(i, fences, fences_lock); + return -ENOMEM; + } + fences[i] = &dma_fence->base; + + spin_lock_init(fences_lock[i]); + dma_fence_init(fences[i], &hw_fence_dbg_ops, fences_lock[i], + client_info_src->dma_context, hw_fence_dbg_seqno + i); + } + + /* create the fence array from array of dma fences */ + fence_array = dma_fence_array_create(num_fences, fences, + client_info_src->dma_context, hw_fence_dbg_seqno + num_fences, 0); + if (!fence_array) { + HWFNC_ERR("Error creating fence_array\n"); + _cleanup_fences(num_fences - 1, fences, fences_lock); + return -EINVAL; + } + + /* create hw fence and write to tx queue for each dma fence */ + for (i = 0; i < num_fences; i++) { + params.fence = fences[i]; + params.handle = &hash; + + ret = msm_hw_fence_create(client_info_src->client_handle, ¶ms); + if (ret) { + HWFNC_ERR("Error creating HW fence\n"); + count = -EINVAL; + goto error; + } + + /* Write to Tx queue */ + hw_fence_update_queue(drv_data, hw_fence_client, client_info_src->dma_context, + hw_fence_dbg_seqno + i, hash, 0, 0, 0, + HW_FENCE_TX_QUEUE - 1); + } + + /* wait on the fence array */ + fence_array_fence = &fence_array->base; + msm_hw_fence_wait_update_v2(client_info_dst->client_handle, &fence_array_fence, NULL, NULL, + 1, 1); + + signal_id = dbg_out_clients_signal_map_no_dpu[client_id_src].ipc_signal_id; + if (signal_id < 0) { + count = -EINVAL; + goto error; + } + + /* write to ipcc to trigger the irq */ + tx_client = drv_data->ipcc_client_pid; + rx_client = drv_data->ipcc_client_vid; + hw_fence_ipcc_trigger_signal(drv_data, tx_client, rx_client, signal_id); + + usleep_range(drv_data->debugfs_data.hw_fence_sim_release_delay, + (drv_data->debugfs_data.hw_fence_sim_release_delay + 5)); + +error: + /* this frees the memory for the fence-array and each dma-fence */ + dma_fence_put(&fence_array->base); + + /* + * free array of pointers, no need to call kfree in 'fences', since that is released + * from the fence-array release api + */ + kfree(fences_lock); + + return count; +} + +int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, + int client_id) +{ + struct msm_hw_fence_client *hw_fence_client; + + if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id > HW_FENCE_CLIENT_ID_VAL6) { + HWFNC_ERR("invalid client_id: %d min: %d max: %d\n", client_id, + HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6); + return -EINVAL; + } + + mutex_lock(&drv_data->clients_register_lock); + + if (!drv_data->clients[client_id]) { + mutex_unlock(&drv_data->clients_register_lock); + HWFNC_ERR("Processing workaround for unregistered val client:%d\n", client_id); + return -EINVAL; + } + + hw_fence_client = drv_data->clients[client_id]; + + HWFNC_DBG_IRQ("Processing validation client workaround client_id:%d\n", client_id); + + /* set the atomic flag, to signal the client wait */ + atomic_set(&hw_fence_client->val_signal, 1); + + /* wake-up waiting client */ + wake_up_all(&hw_fence_client->wait_queue); + + mutex_unlock(&drv_data->clients_register_lock); + + return 0; +} + +static long _process_val_signal(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + struct dma_fence *fence, u64 hash, u64 mask, u32 *error) +{ + struct msm_hw_fence_queue_payload payload; + int read = 1, queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */ + u64 context, seqno; + + /* clear validation signal flag */ + atomic_set(&hw_fence_client->val_signal, 0); + + context = fence ? fence->context : 0; + seqno = fence ? fence->seqno : 0; + HWFNC_DBG_L("Client_id:%u attempting to process signalled fence:%llu\n", + hw_fence_client->client_id, hash); + while (read) { + read = hw_fence_read_queue(drv_data, hw_fence_client, &payload, queue_type); + if (read < 0) { + HWFNC_ERR("unable to read client rxq client_id:%u\n", + hw_fence_client->client_id); + break; + } + HWFNC_DBG_L("Client_id: %u rxq read: hash:%llu, flags:%llu, error:%u\n", + hw_fence_client->client_id, payload.hash, payload.flags, payload.error); + if ((fence && payload.ctxt_id == context && payload.seqno == seqno) || + (mask && ((mask & hash) == (mask & payload.hash)))) { + *error = payload.error; + + if (read > 0) { + HWFNC_DBG_L("Client:%d has non-empty rxq, set val_signal flag\n", + hw_fence_client->client_id); + atomic_set(&hw_fence_client->val_signal, 1); + } + + return 0; + } + } + + HWFNC_ERR("fence received: hash:%llu ctx:%llu seq:%llu did not match expected fence\n", + payload.hash, payload.ctxt_id, payload.seqno); + HWFNC_ERR("Client_id:%u fence expected: hash:%llu ctx:%llu seq:%llu\n", + hw_fence_client->client_id, hash, context, seqno); + + return -EINVAL; +} + +int hw_fence_debug_wait_val(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 hash, u64 mask, + u64 timeout_ms, u32 *error) +{ + ktime_t cur_ktime, exp_ktime; + int ret = -EINVAL; + + if (!hw_fence_client || !drv_data) { + HWFNC_ERR("invalid client\n"); + return -EINVAL; + } + + exp_ktime = ktime_add_ms(ktime_get(), timeout_ms); + HWFNC_DBG_L("Client_id:%u attempting to wait on fence:%llu\n", + hw_fence_client->client_id, hash); + while (ret) { + do { + ret = wait_event_timeout(hw_fence_client->wait_queue, + atomic_read(&hw_fence_client->val_signal) > 0, + msecs_to_jiffies(timeout_ms)); + cur_ktime = ktime_get(); + } while ((atomic_read(&hw_fence_client->val_signal) <= 0) && (ret == 0) && + ktime_compare_safe(exp_ktime, cur_ktime) > 0); + + if (!ret) { + HWFNC_ERR("Client_id: %u timed out waiting for the client signal %llu\n", + hw_fence_client->client_id, timeout_ms); + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + return -ETIMEDOUT; + } + ret = _process_val_signal(drv_data, hw_fence_client, fence, hash, mask, error); + /* if val client fails to find expected fence, keep waiting until timeout */ + } + + return ret; +} + +static const struct file_operations hw_fence_reset_client_fops = { + .open = simple_open, + .write = hw_fence_dbg_reset_client_wr, +}; + +static const struct file_operations hw_fence_register_clients_fops = { + .open = simple_open, + .write = hw_fence_dbg_register_clients_wr, +}; + +static const struct file_operations hw_fence_tx_and_signal_clients_fops = { + .open = simple_open, + .write = hw_fence_dbg_tx_and_signal_clients_wr, +}; + +static const struct file_operations hw_fence_create_fops = { + .open = simple_open, + .write = hw_fence_dbg_create_wr, +}; + +static const struct file_operations hw_fence_dump_table_fops = { + .open = simple_open, + .write = hw_fence_dbg_dump_table_wr, + .read = hw_fence_dbg_dump_table_rd, +}; + +static const struct file_operations hw_fence_dump_queues_fops = { + .open = simple_open, + .write = hw_fence_dbg_dump_queues_wr, + .read = hw_fence_dbg_dump_queues_rd, +}; + +static const struct file_operations hw_fence_dump_events_fops = { + .open = simple_open, + .read = hw_fence_dbg_dump_events_rd, +}; + +static const struct file_operations hw_fence_create_join_fence_fops = { + .open = simple_open, + .write = hw_fence_dbg_create_join_fence, +}; + +int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data) +{ + struct dentry *debugfs_root; + + debugfs_root = debugfs_create_dir("hw_fence", NULL); + if (IS_ERR_OR_NULL(debugfs_root)) { + HWFNC_ERR("debugfs_root create_dir fail, error %ld\n", + PTR_ERR(debugfs_root)); + drv_data->debugfs_data.root = NULL; + return -EINVAL; + } + + mutex_init(&drv_data->debugfs_data.clients_list_lock); + INIT_LIST_HEAD(&drv_data->debugfs_data.clients_list); + drv_data->debugfs_data.root = debugfs_root; + drv_data->debugfs_data.create_hw_fences = true; + drv_data->debugfs_data.hw_fence_sim_release_delay = 8333; /* uS */ + + debugfs_create_file("ipc_trigger", 0600, debugfs_root, drv_data, + &hw_fence_dbg_ipcc_fops); + debugfs_create_file("dpu_trigger", 0600, debugfs_root, drv_data, + &hw_fence_dbg_ipcc_dpu_fops); + debugfs_create_file("hw_fence_reset_client", 0600, debugfs_root, drv_data, + &hw_fence_reset_client_fops); + debugfs_create_file("hw_fence_register_clients", 0600, debugfs_root, drv_data, + &hw_fence_register_clients_fops); + debugfs_create_file("hw_fence_tx_and_signal", 0600, debugfs_root, drv_data, + &hw_fence_tx_and_signal_clients_fops); + debugfs_create_file("hw_fence_create_join_fence", 0600, debugfs_root, drv_data, + &hw_fence_create_join_fence_fops); + debugfs_create_bool("create_hw_fences", 0600, debugfs_root, + &drv_data->debugfs_data.create_hw_fences); + debugfs_create_u32("sleep_range_us", 0600, debugfs_root, + &drv_data->debugfs_data.hw_fence_sim_release_delay); + debugfs_create_file("hw_fence_create", 0600, debugfs_root, drv_data, + &hw_fence_create_fops); + debugfs_create_u32("hw_fence_debug_level", 0600, debugfs_root, &msm_hw_fence_debug_level); + debugfs_create_file("hw_fence_dump_table", 0600, debugfs_root, drv_data, + &hw_fence_dump_table_fops); + debugfs_create_file("hw_fence_dump_queues", 0600, debugfs_root, drv_data, + &hw_fence_dump_queues_fops); + debugfs_create_file("hw_sync", 0600, debugfs_root, NULL, &hw_sync_debugfs_fops); + debugfs_create_u64("hw_fence_lock_wake_cnt", 0600, debugfs_root, + &drv_data->debugfs_data.lock_wake_cnt); + debugfs_create_file("hw_fence_dump_events", 0600, debugfs_root, drv_data, + &hw_fence_dump_events_fops); + + return 0; +} + +#else +int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data) +{ + return 0; +} +#endif /* CONFIG_DEBUG_FS */ diff --git a/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_interop.c b/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_interop.c new file mode 100644 index 0000000000..e165427dba --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_interop.c @@ -0,0 +1,355 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include "msm_hw_fence.h" +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_debug.h" +#include "hw_fence_drv_interop.h" + +/** + * HW_FENCE_SYNX_FENCE_CLIENT_ID: + * ClientID for fences created to back synx fences + */ +#define HW_FENCE_SYNX_FENCE_CLIENT_ID (~(u32)1) + +/** + * HW_FENCE_SYNX_FENCE_CLIENT_ID: + * ClientID for fences created to back fences with native dma-fence producers + */ +#define HW_FENCE_NATIVE_FENCE_CLIENT_ID (~(u32)2) + +struct synx_hwfence_interops synx_interops = { + .share_handle_status = NULL, + .get_fence = NULL, + .notify_recover = NULL, +}; + +int hw_fence_interop_to_synx_status(int hw_fence_status_code) +{ + int synx_status_code; + + switch (hw_fence_status_code) { + case 0: + synx_status_code = SYNX_SUCCESS; + break; + case -ENOMEM: + synx_status_code = -SYNX_NOMEM; + break; + case -EPERM: + synx_status_code = -SYNX_NOPERM; + break; + case -ETIMEDOUT: + synx_status_code = -SYNX_TIMEOUT; + break; + case -EALREADY: + synx_status_code = -SYNX_ALREADY; + break; + case -ENOENT: + synx_status_code = -SYNX_NOENT; + break; + case -EINVAL: + synx_status_code = -SYNX_INVALID; + break; + case -EBUSY: + synx_status_code = -SYNX_BUSY; + break; + case -EAGAIN: + synx_status_code = -SYNX_EAGAIN; + break; + default: + synx_status_code = hw_fence_status_code; + break; + } + + return synx_status_code; +} + +u32 hw_fence_interop_to_synx_signal_status(u32 flags, u32 error) +{ + u32 status; + + if (!(flags & MSM_HW_FENCE_FLAG_SIGNAL)) { + status = SYNX_STATE_ACTIVE; + goto end; + } + + switch (error) { + case 0: + status = SYNX_STATE_SIGNALED_SUCCESS; + break; + case MSM_HW_FENCE_ERROR_RESET: + status = SYNX_STATE_SIGNALED_SSR; + break; + default: + status = error; + break; + } + +end: + HWFNC_DBG_L("fence flags:%u err:%u status:%u\n", flags, error, status); + + return status; +} + +u32 hw_fence_interop_to_hw_fence_error(u32 status) +{ + u32 error; + + switch (status) { + case SYNX_STATE_INVALID: + HWFNC_ERR("converting error status for invalid fence\n"); + error = SYNX_INVALID; + break; + case SYNX_STATE_ACTIVE: + HWFNC_ERR("converting error status for unsignaled fence\n"); + error = 0; + break; + case SYNX_STATE_SIGNALED_SUCCESS: + error = 0; + break; + case SYNX_STATE_SIGNALED_SSR: + error = MSM_HW_FENCE_ERROR_RESET; + break; + default: + error = status; + break; + } + HWFNC_DBG_L("fence status:%u err:%u\n", status, error); + + return error; +} + +static int _update_interop_fence(struct synx_import_indv_params *params, u64 handle) +{ + u32 signal_status; + int ret, error; + + if (!params->new_h_synx || !synx_interops.share_handle_status) { + HWFNC_ERR("invalid new_h_synx:0x%pK share_handle_status:0x%pK\n", + params->new_h_synx, synx_interops.share_handle_status); + return -EINVAL; + } + + ret = synx_interops.share_handle_status(params, handle, &signal_status); + if (ret || signal_status == SYNX_STATE_INVALID) { + HWFNC_ERR("failed to share handle and signal status handle:%llu ret:%d\n", + handle, ret); + /* destroy reference held by signal*/ + hw_fence_destroy_refcount(hw_fence_drv_data, handle, HW_FENCE_FCTL_REFCOUNT); + + return ret; + } + if (signal_status != SYNX_STATE_ACTIVE) { + error = hw_fence_interop_to_hw_fence_error(signal_status); + ret = hw_fence_signal_fence(hw_fence_drv_data, NULL, handle, error, true); + if (ret) { + HWFNC_ERR("Failed to signal hwfence handle:%llu error:%u\n", handle, error); + return ret; + } + } + + /* store h_synx for debugging purposes */ + ret = hw_fence_update_hsynx(hw_fence_drv_data, handle, *params->new_h_synx, false); + if (ret) + HWFNC_ERR("Failed to update hwfence handle:%llu h_synx:%u\n", handle, + *params->new_h_synx); + + return ret; +} + +int hw_fence_interop_create_fence_from_import(struct synx_import_indv_params *params) +{ + struct msm_hw_fence_client dummy_client; + struct dma_fence *fence; + int destroy_ret, ret; + unsigned long flags; + bool is_synx; + u64 handle; + + if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->fence)) { + HWFNC_ERR("invalid params:0x%pK fence:0x%pK\n", + params, IS_ERR_OR_NULL(params) ? NULL : params->fence); + return -SYNX_INVALID; + } + + fence = (struct dma_fence *)params->fence; + spin_lock_irqsave(fence->lock, flags); + + /* hw-fence already present, so no need to create new hw-fence */ + if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + spin_unlock_irqrestore(fence->lock, flags); + return SYNX_SUCCESS; + } + is_synx = test_bit(SYNX_NATIVE_FENCE_FLAG_ENABLED_BIT, &fence->flags); + + /* only synx clients can signal synx fences; no one can signal sw dma-fence from fw */ + dummy_client.client_id = is_synx ? HW_FENCE_SYNX_FENCE_CLIENT_ID : + HW_FENCE_NATIVE_FENCE_CLIENT_ID; + ret = hw_fence_create(hw_fence_drv_data, &dummy_client, (u64)fence, fence->context, + fence->seqno, &handle); + if (ret) { + HWFNC_ERR("failed create fence client:%d ctx:%llu seq:%llu is_synx:%s ret:%d\n", + dummy_client.client_id, fence->context, fence->seqno, + is_synx ? "true" : "false", ret); + spin_unlock_irqrestore(fence->lock, flags); + return hw_fence_interop_to_synx_status(ret); + } + set_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags); + spin_unlock_irqrestore(fence->lock, flags); + + if (is_synx) + /* exchange handles and register fence controller for wait on synx fence */ + ret = _update_interop_fence(params, handle); + else + /* native dma-fences do not have a signaling client, remove ref for fctl signal */ + ret = hw_fence_destroy_refcount(hw_fence_drv_data, handle, HW_FENCE_FCTL_REFCOUNT); + + if (ret) { + HWFNC_ERR("failed to update for signaling client handle:%llu is_synx:%s ret:%d\n", + handle, is_synx ? "true" : "false", ret); + goto error; + } + + ret = hw_fence_add_callback(hw_fence_drv_data, fence, handle); + if (ret) + HWFNC_ERR("failed to add signal callback for fence handle:%llu is_synx:%s ret:%d\n", + handle, is_synx ? "true" : "false", ret); + +error: + /* destroy reference held by creator of fence */ + destroy_ret = hw_fence_destroy_with_hash(hw_fence_drv_data, &dummy_client, + handle); + if (destroy_ret) { + HWFNC_ERR("failed destroy fence client:%d handle:%llu is_synx:%s ret:%d\n", + dummy_client.client_id, handle, is_synx ? "true" : "false", ret); + ret = destroy_ret; + } + + return hw_fence_interop_to_synx_status(ret); +} + +int hw_fence_interop_share_handle_status(struct synx_import_indv_params *params, u32 h_synx, + u32 *signal_status) +{ + struct msm_hw_fence *hw_fence; + int destroy_ret, ret = 0; + struct dma_fence *fence; + u64 flags, handle; + bool is_signaled; + u32 error; + + ret = hw_fence_check_hw_fence_driver(hw_fence_drv_data); + if (ret) + return hw_fence_interop_to_synx_status(ret); + + if (!hw_fence_drv_data->fctl_ready) { + HWFNC_ERR("fctl in invalid state, cannot perform operation\n"); + return -SYNX_EAGAIN; + } + + if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->new_h_synx) || + !(params->flags & SYNX_IMPORT_DMA_FENCE) || + (params->flags & SYNX_IMPORT_SYNX_FENCE) || IS_ERR_OR_NULL(params->fence)) { + HWFNC_ERR("invalid params:0x%pK h_synx:0x%pK flags:0x%x fence:0x%pK\n", + params, IS_ERR_OR_NULL(params) ? NULL : params->new_h_synx, + IS_ERR_OR_NULL(params) ? 0 : params->flags, + IS_ERR_OR_NULL(params) ? NULL : params->fence); + return -SYNX_INVALID; + } + fence = params->fence; + if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + HWFNC_ERR("invalid hwfence ctx:%llu seqno:%llu flags:%lx\n", fence->context, + fence->seqno, fence->flags); + return -SYNX_INVALID; + } + + hw_fence = hw_fence_find_with_dma_fence(hw_fence_drv_data, NULL, fence, &handle, + &is_signaled, false); + + if (is_signaled) { + *signal_status = dma_fence_get_status(fence); + return SYNX_SUCCESS; + } + if (!hw_fence) { + HWFNC_ERR("failed to find hw-fence for ctx:%llu seq:%llu\n", fence->context, + fence->seqno); + return -SYNX_INVALID; + } + + ret = hw_fence_get_flags_error(hw_fence_drv_data, handle, &flags, &error); + if (ret) { + HWFNC_ERR("Failed to get flags and error hwfence handle:%llu\n", handle); + goto end; + } + + *signal_status = hw_fence_interop_to_synx_signal_status(flags, error); + if (*signal_status >= SYNX_STATE_SIGNALED_SUCCESS) + goto end; + + /* update h_synx to register the synx framework as a waiter on the hw-fence */ + ret = hw_fence_update_hsynx(hw_fence_drv_data, handle, h_synx, true); + if (ret) { + HWFNC_ERR("failed to set h_synx for hw-fence handle:%llu\n", handle); + goto end; + } + *params->new_h_synx = (u32)handle; + +end: + /* release reference held to find hw-fence */ + destroy_ret = hw_fence_destroy_with_hash(hw_fence_drv_data, NULL, handle); + if (destroy_ret) { + HWFNC_ERR("Failed to decrement refcount on hw-fence handle:%llu\n", handle); + ret = destroy_ret; + } + + return hw_fence_interop_to_synx_status(ret); +} + +void *hw_fence_interop_get_fence(u32 h_synx) +{ + struct dma_fence *fence; + int ret; + + ret = hw_fence_check_hw_fence_driver(hw_fence_drv_data); + if (ret) + return ERR_PTR(hw_fence_interop_to_synx_status(ret)); + + if (!(h_synx & SYNX_HW_FENCE_HANDLE_FLAG)) { + HWFNC_ERR("invalid h_synx:%u does not have hw-fence handle bit set:%lu\n", + h_synx, SYNX_HW_FENCE_HANDLE_FLAG); + return ERR_PTR(-SYNX_INVALID); + } + + h_synx &= HW_FENCE_HANDLE_INDEX_MASK; + fence = hw_fence_dma_fence_find(hw_fence_drv_data, h_synx, true); + if (!fence) { + HWFNC_ERR("failed to find dma-fence for hw-fence idx:%u\n", h_synx); + return ERR_PTR(-SYNX_INVALID); + } + + return (void *)fence; +} + +int synx_hwfence_init_interops(struct synx_hwfence_interops *synx_ops, + struct synx_hwfence_interops *hwfence_ops) +{ + if (IS_ERR_OR_NULL(synx_ops) || IS_ERR_OR_NULL(hwfence_ops)) { + HWFNC_ERR("invalid params synx_ops:0x%pK hwfence_ops:0x%pK\n", synx_ops, + hwfence_ops); + return -EINVAL; + } + + synx_interops.share_handle_status = synx_ops->share_handle_status; + synx_interops.get_fence = synx_ops->get_fence; + synx_interops.notify_recover = synx_ops->notify_recover; + hwfence_ops->share_handle_status = hw_fence_interop_share_handle_status; + hwfence_ops->get_fence = hw_fence_interop_get_fence; + + return 0; +} +EXPORT_SYMBOL_GPL(synx_hwfence_init_interops); diff --git a/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_ipc.c b/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_ipc.c new file mode 100644 index 0000000000..6ebe4ca010 --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_ipc.c @@ -0,0 +1,784 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_debug.h" + +/* + * Max size of base table with ipc mappings, with one mapping per client type with configurable + * number of subclients + */ +#define HW_FENCE_IPC_MAP_MAX (HW_FENCE_MAX_STATIC_CLIENTS_INDEX + \ + HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE) + +/** + * HW_FENCE_IPCC_MAX_LOOPS: + * Max number of times HW Fence Driver can read interrupt information + */ +#define HW_FENCE_IPCC_MAX_LOOPS 100 + +/** + * struct hw_fence_client_ipc_map - map client id with ipc signal for trigger. + * @ipc_client_id_virt: virtual ipc client id for the hw-fence client. + * @ipc_client_id_phys: physical ipc client id for the hw-fence client. + * @ipc_signal_id: ipc signal id for the hw-fence client. + * @update_rxq: bool to indicate if client requires rx queue update in general signal case + * (e.g. if dma-fence is signaled) + * @signaled_update_rxq: bool to indicate if client requires rx queue update when registering to + * wait on an already signaled fence + * @signaled_send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences + * @txq_update_send_ipc: bool to indicate if client requires ipc interrupt for signaled fences + */ +struct hw_fence_client_ipc_map { + int ipc_client_id_virt; + int ipc_client_id_phys; + int ipc_signal_id; + bool update_rxq; + bool signaled_update_rxq; + bool signaled_send_ipc; + bool txq_update_send_ipc; +}; + +/** + * struct hw_fence_clients_ipc_map - Table makes the 'client to signal' mapping, which is + * used by the hw fence driver to trigger ipc signal when hw fence is already + * signaled. + * This version is for targets that support dpu client id. + * + * Note that the index of this struct must match the enum hw_fence_client_id + */ +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_IPC_MAP_MAX] = { + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, true, false, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 1, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 2, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 3, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 4, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 5, false, false, true, + false}, +#if IS_ENABLED(CONFIG_DEBUG_FS) + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, true, false, + true}, +#else + {0, 0, 0, false, false, false, false}, /* val0 */ + {0, 0, 0, false, false, false, false}, /* val1 */ + {0, 0, 0, false, false, false, false}, /* val2 */ + {0, 0, 0, false, false, false, false}, /* val3 */ + {0, 0, 0, false, false, false, false}, /* val4 */ + {0, 0, 0, false, false, false, false}, /* val5 */ + {0, 0, 0, false, false, false, false}, /* val6 */ +#endif /* CONFIG_DEBUG_FS */ + {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_VID, 0, true, true, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_VID, 0, true, true, true, + false}, +}; + +/** + * struct hw_fence_clients_ipc_map_v2 - Table makes the 'client to signal' mapping, which is + * used by the hw fence driver to trigger ipc signal when hw fence is already + * signaled. + * This version is for targets that support dpu client id and IPC v2. + * + * Note that the index of this struct must match the enum hw_fence_client_id for clients ids less + * than HW_FENCE_MAX_STATIC_CLIENTS_INDEX. + * For clients with configurable sub-clients, the index of this struct matches + * HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC). + */ +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_IPC_MAP_MAX] = { + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 1, true, true, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, true, false, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 1, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 2, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 3, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 4, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 5, false, false, true, + false}, +#if IS_ENABLED(CONFIG_DEBUG_FS) + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, true, false, + true}, +#else + {0, 0, 0, false, false, false, false}, /* val0 */ + {0, 0, 0, false, false, false, false}, /* val1 */ + {0, 0, 0, false, false, false, false}, /* val2 */ + {0, 0, 0, false, false, false, false}, /* val3 */ + {0, 0, 0, false, false, false, false}, /* val4 */ + {0, 0, 0, false, false, false, false}, /* val5 */ + {0, 0, 0, false, false, false, false}, /* val6 */ +#endif /* CONFIG_DEBUG_FS */ + {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID, 0, true, true, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true, true, + false}, + {0, 0, 0, false, false, false, false}, /* ipa */ + {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, false, true, + false}, +}; + +/** + * struct hw_fence_clients_ipc_map_sun - Table makes the 'client to signal' mapping, which is + * used by the hw fence driver to trigger ipc signal when hw fence is already + * signaled. + * This version is for sun target. + * + * Note that the index of this struct must match the enum hw_fence_client_id for clients ids less + * than HW_FENCE_MAX_STATIC_CLIENTS_INDEX. + * For clients with configurable sub-clients, the index of this struct matches + * HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC). + */ +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_sun[HW_FENCE_IPC_MAP_MAX] = { + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 0, true, true, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, true, false, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 0, false, false, true, + false}, /* ctl0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 1, false, false, true, + false}, /* ctl1 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 2, false, false, true, + false}, /* ctl2 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 3, false, false, true, + false}, /* ctl3 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 4, false, false, true, + false}, /* ctl4 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 5, false, false, true, + false}, /* ctl5 */ +#if IS_ENABLED(CONFIG_DEBUG_FS) + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, true, true, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, true, true, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, true, true, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, true, true, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, true, true, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, true, true, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, true, true, + true}, +#else + {0, 0, 0, false, false, false, false}, /* val0 */ + {0, 0, 0, false, false, false, false}, /* val1 */ + {0, 0, 0, false, false, false, false}, /* val2 */ + {0, 0, 0, false, false, false, false}, /* val3 */ + {0, 0, 0, false, false, false, false}, /* val4 */ + {0, 0, 0, false, false, false, false}, /* val5 */ + {0, 0, 0, false, false, false, false}, /* val6 */ +#endif /* CONFIG_DEBUG_FS */ + {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID_SUN, 0, true, true, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true, true, + false}, + {0, 0, 0, false, false, false, false}, /* ipa */ + {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, false, true, + false}, +}; + +/** + * struct hw_fence_clients_ipc_map_niobe - Table makes the 'client to signal' mapping, which is + * used by the hw fence driver to trigger ipc signal when hw fence is already + * signaled. + * This version is for niobe target. + * + * Note that the index of this struct must match the enum hw_fence_client_id for clients ids less + * than HW_FENCE_MAX_STATIC_CLIENTS_INDEX. + * For clients with configurable sub-clients, the index of this struct matches + * HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC). + */ +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_niobe[HW_FENCE_IPC_MAP_MAX] = { + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 0, true, true, + true, false}, /* ctrlq */ + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID_NIOBE, 0, true, false, + false, true}, /* gfx */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 0, false, false, + true, false}, /* ctl0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 1, false, false, + true, false}, /* ctl1 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 2, false, false, + true, false}, /* ctl2 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 3, false, false, + true, false}, /* ctl3 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 4, false, false, + true, false}, /* ctl4 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 5, false, false, + true, false}, /* ctl5 */ +#if IS_ENABLED(CONFIG_DEBUG_FS) + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 21, true, true, + true, true}, /* val0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 22, true, true, + true, true}, /* val1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 23, true, true, + true, true}, /* val2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 24, true, true, + true, true}, /* val3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 25, true, true, + true, true}, /* val4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 26, true, true, + true, true}, /* val5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 27, true, true, + true, true}, /* val6 */ +#else + {0, 0, 0, false, false, false, false}, /* val0 */ + {0, 0, 0, false, false, false, false}, /* val1 */ + {0, 0, 0, false, false, false, false}, /* val2 */ + {0, 0, 0, false, false, false, false}, /* val3 */ + {0, 0, 0, false, false, false, false}, /* val4 */ + {0, 0, 0, false, false, false, false}, /* val5 */ + {0, 0, 0, false, false, false, false}, /* val6 */ +#endif /* CONFIG_DEBUG_FS */ + {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID_NIOBE, 0, true, true, true, + false}, /* ipe */ + {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID_NIOBE, 0, true, true, true, + false}, /* vpu */ + {HW_FENCE_IPC_CLIENT_ID_IPA_VID, HW_FENCE_IPC_CLIENT_ID_IPA_PID_NIOBE, 0, true, true, true, + false}, /* ipa */ + {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID_NIOBE, 0, false, false, + true, false}, /* ife0 */ + {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID_NIOBE, 0, false, false, + true, false}, /* ife1 */ + {HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID_NIOBE, 0, false, false, + true, false}, /* ife2 */ + {HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID_NIOBE, 0, false, false, + true, false}, /* ife3 */ + {HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID_NIOBE, 0, false, false, + true, false}, /* ife4 */ + {HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID_NIOBE, 0, false, false, + true, false}, /* ife5 */ + {HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID_NIOBE, 0, false, false, + true, false}, /* ife6 */ + {HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID_NIOBE, 0, false, false, + true, false}, /* ife7 */ + {HW_FENCE_IPC_CLIENT_ID_IFE8_VID, HW_FENCE_IPC_CLIENT_ID_IFE8_PID_NIOBE, 0, false, false, + true, false}, /* ife8 */ + {HW_FENCE_IPC_CLIENT_ID_IFE9_VID, HW_FENCE_IPC_CLIENT_ID_IFE9_PID_NIOBE, 0, false, false, + true, false}, /* ife9 */ + {HW_FENCE_IPC_CLIENT_ID_IFE10_VID, HW_FENCE_IPC_CLIENT_ID_IFE10_PID_NIOBE, 0, false, false, + true, false}, /* ife10 */ + {HW_FENCE_IPC_CLIENT_ID_IFE11_VID, HW_FENCE_IPC_CLIENT_ID_IFE11_PID_NIOBE, 0, false, false, + true, false}, /* ife11 */ + +}; + +int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id) +{ + if (!drv_data || client_id >= drv_data->clients_num) + return -EINVAL; + + return drv_data->ipc_clients_table[client_id].ipc_client_id_virt; +} + +int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 client_id) +{ + if (!drv_data || client_id >= drv_data->clients_num) + return -EINVAL; + + return drv_data->ipc_clients_table[client_id].ipc_client_id_phys; +} + +int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id) +{ + if (!drv_data || client_id >= drv_data->clients_num) + return -EINVAL; + + return drv_data->ipc_clients_table[client_id].ipc_signal_id; +} + +bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id) +{ + if (!drv_data || client_id >= drv_data->clients_num) + return false; + + return drv_data->ipc_clients_table[client_id].update_rxq; +} + +bool hw_fence_ipcc_signaled_needs_rxq_update(struct hw_fence_driver_data *drv_data, + int client_id) +{ + if (!drv_data || client_id >= drv_data->clients_num) + return false; + + return drv_data->ipc_clients_table[client_id].signaled_update_rxq; +} + +bool hw_fence_ipcc_signaled_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id) +{ + if (!drv_data || client_id >= drv_data->clients_num) + return false; + + return drv_data->ipc_clients_table[client_id].signaled_send_ipc; +} + +bool hw_fence_ipcc_txq_update_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id) +{ + if (!drv_data || client_id >= drv_data->clients_num) + return false; + + return drv_data->ipc_clients_table[client_id].txq_update_send_ipc; +} + +/** + * _get_ipc_phys_client_name() - Returns ipc client name from its physical id, used for debugging. + */ +static inline char *_get_ipc_phys_client_name(u32 client_id) +{ + switch (client_id) { + case HW_FENCE_IPC_CLIENT_ID_APPS_PID: + return "APPS_PID"; + case HW_FENCE_IPC_CLIENT_ID_GPU_PID: + return "GPU_PID"; + case HW_FENCE_IPC_CLIENT_ID_DPU_PID: + return "DPU_PID"; + case HW_FENCE_IPC_CLIENT_ID_IPE_PID: + return "IPE_PID"; + case HW_FENCE_IPC_CLIENT_ID_VPU_PID: + return "VPU_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE0_PID: + return "IFE0_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE1_PID: + return "IFE1_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE2_PID: + return "IFE2_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE3_PID: + return "IFE3_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE4_PID: + return "IFE4_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE5_PID: + return "IFE5_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE6_PID: + return "IFE6_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE7_PID: + return "IFE7_PID"; + } + + return "UNKNOWN_PID"; +} + +/** + * _get_ipc_virt_client_name() - Returns ipc client name from its virtual id, used for debugging. + */ +static inline char *_get_ipc_virt_client_name(u32 client_id) +{ + switch (client_id) { + case HW_FENCE_IPC_CLIENT_ID_APPS_VID: + return "APPS_VID"; + case HW_FENCE_IPC_CLIENT_ID_GPU_VID: + return "GPU_VID"; + case HW_FENCE_IPC_CLIENT_ID_DPU_VID: + return "DPU_VID"; + case HW_FENCE_IPC_CLIENT_ID_IPE_VID: + return "IPE_VID"; + case HW_FENCE_IPC_CLIENT_ID_VPU_VID: + return "VPU_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE0_VID: + return "IFE0_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE1_VID: + return "IFE1_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE2_VID: + return "IFE2_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE3_VID: + return "IFE3_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE4_VID: + return "IFE4_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE5_VID: + return "IFE5_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE6_VID: + return "IFE6_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE7_VID: + return "IFE7_VID"; + } + + return "UNKNOWN_VID"; +} + +void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data, + u32 tx_client_pid, u32 rx_client_vid, u32 signal_id) +{ + void __iomem *ptr; + u32 val; + + /* Send signal */ + ptr = IPC_PROTOCOLp_CLIENTc_SEND(drv_data->ipcc_io_mem, drv_data->protocol_id, + tx_client_pid); + val = (rx_client_vid << 16) | signal_id; + + HWFNC_DBG_IRQ("Sending ipcc from %s (%d) to %s (%d) signal_id:%d [wr:0x%x to off:0x%pK]\n", + _get_ipc_phys_client_name(tx_client_pid), tx_client_pid, + _get_ipc_virt_client_name(rx_client_vid), rx_client_vid, + signal_id, val, ptr); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); + writel_relaxed(val, ptr); + + /* Make sure value is written */ + wmb(); +} + +static int _hw_fence_ipcc_init_map_with_configurable_clients(struct hw_fence_driver_data *drv_data, + struct hw_fence_client_ipc_map *base_table) +{ + int i, j, map_idx; + size_t size; + + size = drv_data->clients_num * sizeof(struct hw_fence_client_ipc_map); + drv_data->ipc_clients_table = kzalloc(size, GFP_KERNEL); + + if (!drv_data->ipc_clients_table) + return -ENOMEM; + + /* copy mappings for static hw fence clients */ + size = HW_FENCE_MAX_STATIC_CLIENTS_INDEX * sizeof(struct hw_fence_client_ipc_map); + memcpy(drv_data->ipc_clients_table, base_table, size); + + /* initialize mappings for ipc clients with configurable number of hw fence clients */ + map_idx = HW_FENCE_MAX_STATIC_CLIENTS_INDEX; + for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE; i++) { + int client_type = HW_FENCE_MAX_CLIENT_TYPE_STATIC + i; + int clients_num = drv_data->hw_fence_client_types[client_type].clients_num; + + for (j = 0; j < clients_num; j++) { + /* this should never happen if drv_data->clients_num is correct */ + if (map_idx >= drv_data->clients_num) { + HWFNC_ERR("%s clients_num:%d exceeds drv_data->clients_num:%u\n", + drv_data->hw_fence_client_types[client_type].name, + clients_num, drv_data->clients_num); + return -EINVAL; + } + drv_data->ipc_clients_table[map_idx] = + base_table[HW_FENCE_MAX_STATIC_CLIENTS_INDEX + i]; + drv_data->ipc_clients_table[map_idx].ipc_signal_id = j; + map_idx++; + } + } + + return 0; +} + +/** + * _hw_fence_ipcc_hwrev_init() - Initializes internal driver struct with corresponding ipcc data, + * according to the ipcc hw revision. + * @drv_data: driver data. + * @hwrev: ipcc hw revision. + */ +static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 hwrev) +{ + int ret = 0; + + switch (hwrev) { + case HW_FENCE_IPCC_HW_REV_170: + drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_fctl_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_fctl_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA; + drv_data->ipc_clients_table = hw_fence_clients_ipc_map; + HWFNC_DBG_INIT("ipcc protocol_id: Kalama\n"); + break; + case HW_FENCE_IPCC_HW_REV_203: + drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID; + drv_data->ipcc_fctl_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_fctl_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID; + drv_data->protocol_id = HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE; /* Fence */ + ret = _hw_fence_ipcc_init_map_with_configurable_clients(drv_data, + hw_fence_clients_ipc_map_v2); + HWFNC_DBG_INIT("ipcc protocol_id: Pineapple\n"); + break; + case HW_FENCE_IPCC_HW_REV_2A2: + drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID; + drv_data->ipcc_fctl_vid = drv_data->has_soccp ? HW_FENCE_IPC_CLIENT_ID_SOCCP_VID : + HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_fctl_pid = drv_data->has_soccp ? HW_FENCE_IPC_CLIENT_ID_SOCCP_PID : + HW_FENCE_IPC_CLIENT_ID_APPS_PID; + drv_data->protocol_id = HW_FENCE_IPC_FENCE_PROTOCOL_ID_SUN; /* Fence */ + ret = _hw_fence_ipcc_init_map_with_configurable_clients(drv_data, + hw_fence_clients_ipc_map_sun); + HWFNC_DBG_INIT("ipcc protocol_id: Sun\n"); + break; + case HW_FENCE_IPCC_HW_REV_2B4: + drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE; + drv_data->ipcc_fctl_vid = drv_data->has_soccp ? HW_FENCE_IPC_CLIENT_ID_SOCCP_VID : + HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_fctl_pid = drv_data->has_soccp ? + HW_FENCE_IPC_CLIENT_ID_SOCCP_PID_NIOBE : + HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE; + drv_data->protocol_id = HW_FENCE_IPC_FENCE_PROTOCOL_ID_NIOBE; /* Fence */ + ret = _hw_fence_ipcc_init_map_with_configurable_clients(drv_data, + hw_fence_clients_ipc_map_niobe); + HWFNC_DBG_INIT("ipcc protocol_id: Niobe\n"); + break; + default: + HWFNC_ERR("unrecognized ipcc hw-rev:0x%x\n", hwrev); + return -1; + } + + return ret; +} + +static int _enable_client_signal_pair(struct hw_fence_driver_data *drv_data, + u32 rx_client_id_phys, u32 tx_client_id_vid, u32 signal_id) +{ + void __iomem *ptr; + u32 val; + + if (!drv_data || !drv_data->ipcc_io_mem || !drv_data->protocol_id) { + HWFNC_ERR("invalid drv_data:0x%pK ipcc_io_mem:0x%pK protocol:%d\n", + drv_data, drv_data ? drv_data->ipcc_io_mem : NULL, + drv_data ? drv_data->protocol_id : -1); + return -EINVAL; + } + + val = ((tx_client_id_vid) << 16) | ((signal_id) & 0xFFFF); + ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, drv_data->protocol_id, + rx_client_id_phys); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); + writel_relaxed(val, ptr); + + return 0; +} + +int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) +{ + u32 val; + int ret; + + HWFNC_DBG_H("enable ipc +\n"); + + ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-ipc-ver", &val); + if (ret || !val) { + HWFNC_ERR("missing hw fences ipc-ver entry or invalid ret:%d val:%d\n", ret, val); + return -EINVAL; + } + + if (_hw_fence_ipcc_hwrev_init(drv_data, val)) { + HWFNC_ERR("ipcc protocol id not supported\n"); + return -EINVAL; + } + + /* Enable protocol for ctrl queue */ + hw_fence_ipcc_enable_protocol(drv_data, 0); + + /* Enable Client-Signal pairs from FCTL (SOCCP or APSS(NS)) to APPS(NS) (0x8) */ + ret = _enable_client_signal_pair(drv_data, drv_data->ipcc_client_pid, + drv_data->ipcc_fctl_vid, 0); + + HWFNC_DBG_H("enable ipc -\n"); + + return 0; +} + +int hw_fence_ipcc_enable_protocol(struct hw_fence_driver_data *drv_data, u32 client_id) +{ + void __iomem *ptr; + u32 val; + + if (!drv_data || !drv_data->protocol_id || !drv_data->ipc_clients_table || + client_id >= drv_data->clients_num) { + HWFNC_ERR("drv_data:0x%pK protocol:%d ipc_table:0x%pK client_id:%u max:%u\n", + drv_data, drv_data ? drv_data->protocol_id : -1, + drv_data ? drv_data->ipc_clients_table : NULL, client_id, + drv_data ? drv_data->clients_num : -1); + return -EINVAL; + } + + /* Sets bit(1) to clear when RECV_ID is read */ + val = 0x00000001; + ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id, + drv_data->ipc_clients_table[client_id].ipc_client_id_phys); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%llx\n", val, (u64)ptr); + writel_relaxed(val, ptr); + + return 0; +} + +int hw_fence_ipcc_enable_client_signal_pairs(struct hw_fence_driver_data *drv_data, + u32 start_client) +{ + struct hw_fence_client_ipc_map *hw_fence_client; + int i, ipc_client_vid; + + HWFNC_DBG_H("enable ipc for client signal pairs +\n"); + + if (!drv_data || !drv_data->protocol_id || !drv_data->ipc_clients_table || + start_client >= drv_data->clients_num) { + HWFNC_ERR("drv_data:0x%pK protocol:%d ipc_table:0x%pK start_client:%u max:%u\n", + drv_data, drv_data ? drv_data->protocol_id : -1, + drv_data ? drv_data->ipc_clients_table : NULL, start_client, + drv_data ? drv_data->clients_num : -1); + return -EINVAL; + } + ipc_client_vid = drv_data->ipc_clients_table[start_client].ipc_client_id_virt; + + HWFNC_DBG_H("ipcc_io_mem:0x%llx\n", (u64)drv_data->ipcc_io_mem); + + HWFNC_DBG_H("Initialize %s ipc signals\n", _get_ipc_virt_client_name(ipc_client_vid)); + /* Enable Client-Signal pairs from Client to APPS(NS) (8) */ + for (i = start_client; i < drv_data->clients_num; i++) { + hw_fence_client = &drv_data->ipc_clients_table[i]; + + /* + * Stop after enabling signals for all clients with the same ipcc client id as the + * given client. + */ + if (hw_fence_client->ipc_client_id_virt != ipc_client_vid) + break; + + /* Enable signals for given client */ + HWFNC_DBG_H("%s client:%d vid:%d pid:%d signal:%d has_soccp:%d\n", + _get_ipc_virt_client_name(ipc_client_vid), i, + hw_fence_client->ipc_client_id_virt, hw_fence_client->ipc_client_id_phys, + hw_fence_client->ipc_signal_id, drv_data->has_soccp); + + /* Enable input signal from driver to client */ + if (drv_data->has_soccp || ipc_client_vid != drv_data->ipcc_client_vid) + _enable_client_signal_pair(drv_data, hw_fence_client->ipc_client_id_phys, + drv_data->ipcc_client_vid, hw_fence_client->ipc_signal_id); + + /* If fctl separate from driver, enable separate input fctl-signal for client */ + if (drv_data->ipcc_client_vid != drv_data->ipcc_fctl_vid) + _enable_client_signal_pair(drv_data, hw_fence_client->ipc_client_id_phys, + drv_data->ipcc_fctl_vid, hw_fence_client->ipc_signal_id); + } + + HWFNC_DBG_H("enable %s ipc for start:%d end:%d -\n", + _get_ipc_virt_client_name(ipc_client_vid), start_client, i); + + return 0; +} + +static bool _is_invalid_signaling_client(struct hw_fence_driver_data *drv_data, u32 client_id) +{ +#if IS_ENABLED(CONFIG_DEBUG_FS) + return client_id != drv_data->ipcc_fctl_vid && client_id != drv_data->ipcc_client_vid; +#else + return client_id != drv_data->ipcc_fctl_vid; +#endif +} + +u64 hw_fence_ipcc_get_signaled_clients_mask(struct hw_fence_driver_data *drv_data) +{ + u32 client_id, signal_id, reg_val; + u64 mask = 0; + int i; + + if (!drv_data || !drv_data->protocol_id || !drv_data->ipcc_client_pid || + !drv_data->ipcc_fctl_vid || !drv_data->has_soccp) { + HWFNC_ERR("invalid drv_data:0x%pK protocol:%d drv_pid:%d fctl_vid:%d\n", + drv_data, drv_data ? drv_data->protocol_id : -1, + drv_data ? drv_data->ipcc_client_pid : -1, + drv_data ? drv_data->ipcc_fctl_vid : -1); + return -1; + } + + /* read recv_id until done processing all clients signals */ + for (i = 0; i < HW_FENCE_IPCC_MAX_LOOPS; i++) { + mb(); /* make sure memory is updated */ + reg_val = readl_relaxed(IPC_PROTOCOLp_CLIENTc_RECV_ID(drv_data->ipcc_io_mem, + drv_data->protocol_id, drv_data->ipcc_client_pid)); + + /* finished reading clients */ + if (reg_val == HW_FENCE_IPC_RECV_ID_NONE) + return mask; + + client_id = (reg_val >> 16) & 0xFFFF; + signal_id = reg_val & 0xFFFF; + HWFNC_DBG_IRQ("read recv_id value:0x%x client:%u signal:%u\n", reg_val, client_id, + signal_id); + + if (_is_invalid_signaling_client(drv_data, client_id)) { + HWFNC_ERR("Received client:%u signal:%u expected client:%u\n", + client_id, signal_id, drv_data->ipcc_fctl_vid); + continue; + } + +#if IS_ENABLED(CONFIG_DEBUG_FS) + /* received signals from SOCCP for validation clients */ + if (signal_id >= hw_fence_ipcc_get_signal_id(drv_data, HW_FENCE_CLIENT_ID_VAL0) + && signal_id <= hw_fence_ipcc_get_signal_id(drv_data, + HW_FENCE_CLIENT_ID_VAL6)) + signal_id = signal_id - hw_fence_ipcc_get_signal_id(drv_data, + HW_FENCE_CLIENT_ID_VAL0) + HW_FENCE_CLIENT_ID_VAL0; +#endif /* CONFIG_DEBUG_FS*/ + + mask |= BIT(signal_id); + } + + HWFNC_ERR("irq_handler has too many loops i=%d max:%d\n", i, HW_FENCE_IPCC_MAX_LOOPS); + + return mask; +} diff --git a/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_priv.c b/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_priv.c new file mode 100644 index 0000000000..23f868fd0b --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_priv.c @@ -0,0 +1,2627 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_debug.h" +#include "hw_fence_drv_fence.h" +#if IS_ENABLED(CONFIG_QTI_HW_FENCE_USE_SYNX) +#include +#else +#define SYNX_HW_FENCE_HANDLE_FLAG 0 +#define SYNX_STATE_SIGNALED_CANCEL 4 +#endif /* CONFIG_QTI_HW_FENCE_USE_SYNX */ + +/* Global atomic lock */ +#define GLOBAL_ATOMIC_STORE(drv_data, lock, val) global_atomic_store(drv_data, lock, val) + +#define IS_HW_FENCE_TX_QUEUE(queue_type) ((queue_type) == HW_FENCE_TX_QUEUE - 1) + +#define REQUIRES_IDX_TRANSLATION(queue) \ + ((queue)->rd_wr_idx_factor && ((queue)->rd_wr_idx_start || (queue)->rd_wr_idx_factor > 1)) + +#define IDX_TRANSLATE_CUSTOM_TO_DEFAULT(queue, idx) \ + (((idx) - (queue)->rd_wr_idx_start) * (queue)->rd_wr_idx_factor) + +#define IDX_TRANSLATE_DEFAULT_TO_CUSTOM(queue, idx) \ + (((idx) / (queue)->rd_wr_idx_factor) + (queue)->rd_wr_idx_start) + +/* number of fences searched for HW Fence import */ +#define HW_FENCE_FIND_THRESHOLD 10 + +/* + * Iterates through the hw-fence table populating hash and hw_fence pointers accordingly. + * Note: This internally takes the hw-fence lock during iteration so this loop must be + * exited by setting found = true. + */ +#define for_each_hw_fence(drv_data, hfence, hash, ctx, seqno, start, end, i, found) \ + for ((i) = _hw_fence_iterator_init((drv_data), (hfence), (hash), (ctx), (seqno), \ + (start), (end)); \ + ((i) < (end)) && !(found); \ + (i) = _hw_fence_iterator_next((drv_data), (hfence), (hash), (i), (end), (found))) + +inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data) +{ +#ifdef HWFENCE_USE_SLEEP_TIMER + return readl_relaxed(drv_data->qtime_io_mem); +#else /* USE QTIMER */ + return arch_timer_read_counter(); +#endif /* HWFENCE_USE_SLEEP_TIMER */ +} + +/* on targets with soccp, read_index and write_index etc. fields are in different locations */ +void hw_fence_get_queue_idx_ptrs(struct hw_fence_driver_data *drv_data, void *va_header, + u32 **rd_idx_ptr, u32 **wr_idx_ptr, u32 **tx_wm_ptr) +{ + struct msm_hw_fence_hfi_queue_header *hfi_header; + struct msm_hw_fence_hfi_queue_header_v2 *hfi_header_v2; + + /* if soccp is present, use v2 header data structures */ + if (drv_data->has_soccp) { + hfi_header_v2 = va_header; + *rd_idx_ptr = &hfi_header_v2->read_index; + *wr_idx_ptr = &hfi_header_v2->write_index; + if (tx_wm_ptr) + *tx_wm_ptr = &hfi_header_v2->tx_wm; + } else { + hfi_header = va_header; + *rd_idx_ptr = &hfi_header->read_index; + *wr_idx_ptr = &hfi_header->write_index; + if (tx_wm_ptr) + *tx_wm_ptr = &hfi_header->tx_wm; + } +} + +static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, + enum hw_fence_mem_reserve mem_reserve_id, + struct msm_hw_fence_mem_addr *mem_descriptor, + struct msm_hw_fence_queue *queues, int queues_num, + int client_id) +{ + struct msm_hw_fence_hfi_queue_table_header *hfi_table_header; + struct msm_hw_fence_hfi_queue_header *hfi_queue_header; + struct hw_fence_client_type_desc *desc; + void *ptr, *qptr; + phys_addr_t phys, qphys; + u32 size, start_queue_offset, txq_idx_start = 0, txq_idx_factor = 1; + u32 *wr_idx_ptr, *rd_idx_ptr, *tx_wm_ptr; + int headers_size, queue_size, payload_size; + int start_padding = 0, end_padding = 0; + int i, ret = 0; + bool skip_txq_wr_idx = false; + + HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id); + switch (mem_reserve_id) { + case HW_FENCE_MEM_RESERVE_CTRL_QUEUE: + headers_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE(drv_data->has_soccp); + queue_size = drv_data->hw_fence_ctrl_queue_size; + payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD; + break; + case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: + if (client_id >= drv_data->clients_num || + !drv_data->hw_fence_client_queue_size[client_id].type) { + HWFNC_ERR("Invalid client_id:%d for clients_num:%u\n", client_id, + drv_data->clients_num); + return -EINVAL; + } + + desc = drv_data->hw_fence_client_queue_size[client_id].type; + start_padding = desc->start_padding; + end_padding = desc->end_padding; + headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num, drv_data->has_soccp) + + start_padding + end_padding; + queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries; + payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD; + txq_idx_start = desc->txq_idx_start; + txq_idx_factor = desc->txq_idx_factor ? desc->txq_idx_factor : 1; + skip_txq_wr_idx = desc->skip_txq_wr_idx; + break; + default: + HWFNC_ERR("Unexpected mem reserve id: %d\n", mem_reserve_id); + return -EINVAL; + } + + /* Reserve Virtual and Physical memory for HFI headers */ + ret = hw_fence_utils_reserve_mem(drv_data, mem_reserve_id, &phys, &ptr, &size, client_id); + if (ret) { + HWFNC_ERR("Failed to reserve id:%d client %d\n", mem_reserve_id, client_id); + return -ENOMEM; + } + HWFNC_DBG_INIT("phys:0x%llx ptr:0x%pK size:%d\n", phys, ptr, size); + + /* Populate Memory descriptor with address */ + mem_descriptor->virtual_addr = ptr; + mem_descriptor->device_addr = phys; + mem_descriptor->size = size; /* bytes */ + mem_descriptor->mem_data = NULL; /* Currently we don't need any special info */ + + HWFNC_DBG_INIT("Initialize headers: headers_size:%d start_padding:%d end_padding:%d\n", + headers_size, start_padding, end_padding); + /* Initialize headers info within hfi memory */ + hfi_table_header = (struct msm_hw_fence_hfi_queue_table_header *)ptr; + hfi_table_header->version = 0; + hfi_table_header->size = size; /* bytes */ + /* Offset, from the Base Address, where the first queue header starts */ + hfi_table_header->qhdr0_offset = HW_FENCE_HFI_TABLE_HEADER_SIZE(drv_data->has_soccp) + + start_padding; + hfi_table_header->qhdr_size = HW_FENCE_HFI_QUEUE_HEADER_SIZE(drv_data->has_soccp); + hfi_table_header->num_q = queues_num; /* number of queues */ + hfi_table_header->num_active_q = queues_num; + + /* Initialize Queues Info within HFI memory */ + + /* + * Calculate offset where hfi queue header starts, which it is at the + * end of the hfi table header + */ + HWFNC_DBG_INIT("Initialize queues\n"); + hfi_queue_header = (struct msm_hw_fence_hfi_queue_header *) + ((char *)ptr + hfi_table_header->qhdr0_offset); + for (i = 0; i < queues_num; i++) { + HWFNC_DBG_INIT("init queue[%d]\n", i); + + /* Calculate the offset where the Queue starts */ + start_queue_offset = headers_size + (i * queue_size); /* Bytes */ + qphys = phys + start_queue_offset; /* start of the PA for the queue elems */ + qptr = (char *)ptr + start_queue_offset; /* start of the va for queue elems */ + + /* Set the physical start address in the HFI queue header */ + hfi_queue_header->start_addr = qphys; + + /* Set the queue type (i.e. RX or TX queue) */ + hfi_queue_header->type = IS_HW_FENCE_TX_QUEUE(i) ? HW_FENCE_TX_QUEUE : + HW_FENCE_RX_QUEUE; + + /* Set the size of this header */ + hfi_queue_header->queue_size = queue_size; + + /* Set the payload size */ + hfi_queue_header->pkt_size = payload_size; + + hw_fence_get_queue_idx_ptrs(drv_data, hfi_queue_header, &rd_idx_ptr, &wr_idx_ptr, + &tx_wm_ptr); + + /* Set write index for clients' tx queues that index from nonzero value */ + if (txq_idx_start && IS_HW_FENCE_TX_QUEUE(i) && !*wr_idx_ptr) { + if (skip_txq_wr_idx) + *tx_wm_ptr = txq_idx_start; + *rd_idx_ptr = txq_idx_start; + *wr_idx_ptr = txq_idx_start; + HWFNC_DBG_INIT("init:TX_QUEUE client:%d rd_idx=%s=%u\n", client_id, + skip_txq_wr_idx ? "wr_idx=tx_wm" : "wr_idx", + txq_idx_start); + } + + /* Update memory for hfi_queue_header */ + wmb(); + + /* Store Memory info in the Client data */ + queues[i].va_queue = qptr; + queues[i].pa_queue = qphys; + queues[i].va_header = hfi_queue_header; + queues[i].q_size_bytes = queue_size; + HWFNC_DBG_INIT("init:%s client:%d q[%d] va=0x%pK pa=0x%llx hd:0x%pK sz:%u pkt:%d\n", + hfi_queue_header->type == HW_FENCE_TX_QUEUE ? "TX_QUEUE" : "RX_QUEUE", + client_id, i, queues[i].va_queue, queues[i].pa_queue, queues[i].va_header, + queues[i].q_size_bytes, payload_size); + + /* Store additional tx queue rd_wr_idx properties */ + if (IS_HW_FENCE_TX_QUEUE(i)) { + queues[i].rd_wr_idx_start = txq_idx_start; + queues[i].rd_wr_idx_factor = txq_idx_factor; + queues[i].skip_wr_idx = skip_txq_wr_idx; + } else { + queues[i].rd_wr_idx_factor = 1; + } + HWFNC_DBG_INIT("rd_wr_idx_start:%u rd_wr_idx_factor:%u skip_wr_idx:%s\n", + queues[i].rd_wr_idx_start, queues[i].rd_wr_idx_factor, + queues[i].skip_wr_idx ? "true" : "false"); + + /* Next header */ + hfi_queue_header = (struct msm_hw_fence_hfi_queue_header *) + ((char *)hfi_queue_header + hfi_table_header->qhdr_size); + } + + return ret; +} + +static inline bool _lock_client_queue(int queue_type) +{ + /* Only lock Rx Queue */ + return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? true : false; +} + +char *_get_queue_type(int queue_type) +{ + return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? "RXQ" : "TXQ"; +} + +static void _translate_queue_indexes_custom_to_default(struct msm_hw_fence_queue *queue, + u32 *read_idx, u32 *write_idx) +{ + if (REQUIRES_IDX_TRANSLATION(queue)) { + *read_idx = IDX_TRANSLATE_CUSTOM_TO_DEFAULT(queue, *read_idx); + *write_idx = IDX_TRANSLATE_CUSTOM_TO_DEFAULT(queue, *write_idx); + HWFNC_DBG_Q("rd_idx_u32:%u wr_idx_u32:%u rd_wr_idx start:%u factor:%u\n", + *read_idx, *write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } +} + +int hw_fence_read_queue(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, + int queue_type) +{ + struct msm_hw_fence_queue *queue; + + if (queue_type >= HW_FENCE_CLIENT_QUEUES || !hw_fence_client || !payload) { + HWFNC_ERR("Invalid queue type:%d hw_fence_client:0x%pK payload:0x%pK\n", queue_type, + hw_fence_client, payload); + return -EINVAL; + } + + queue = &hw_fence_client->queues[queue_type]; + HWFNC_DBG_Q("read client:%d queue:0x%pK\n", hw_fence_client->client_id, queue); + + return hw_fence_read_queue_helper(drv_data, queue, payload); +} + +int hw_fence_read_queue_helper(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_queue *queue, struct msm_hw_fence_queue_payload *payload) +{ + u32 read_idx, write_idx, to_read_idx; + u32 *read_ptr, *rd_idx_ptr, *wr_idx_ptr; + u32 payload_size_u32, q_size_u32; + struct msm_hw_fence_queue_payload *read_ptr_payload; + + q_size_u32 = (queue->q_size_bytes / sizeof(u32)); + payload_size_u32 = (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)); + HWFNC_DBG_Q("sizeof payload:%lu\n", sizeof(struct msm_hw_fence_queue_payload)); + + if (!queue->va_header || !payload) { + HWFNC_ERR("Invalid queue\n"); + return -EINVAL; + } + + hw_fence_get_queue_idx_ptrs(drv_data, queue->va_header, &rd_idx_ptr, &wr_idx_ptr, NULL); + + /* Make sure data is ready before read */ + mb(); + + /* Get read and write index */ + read_idx = readl_relaxed(rd_idx_ptr); + write_idx = readl_relaxed(wr_idx_ptr); + + /* translate read and write indexes from custom indexing to dwords with no offset */ + _translate_queue_indexes_custom_to_default(queue, &read_idx, &write_idx); + + HWFNC_DBG_Q("read rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n", + rd_idx_ptr, wr_idx_ptr, read_idx, write_idx, queue); + + if (read_idx == write_idx) { + HWFNC_DBG_Q("Nothing to read!\n"); + return -EINVAL; + } + + /* Move the pointer where we need to read and cast it */ + read_ptr = ((u32 *)queue->va_queue + read_idx); + read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr; + HWFNC_DBG_Q("read_ptr:0x%pK queue: va=0x%pK pa=0x%llx read_ptr_payload:0x%pK\n", read_ptr, + queue->va_queue, queue->pa_queue, read_ptr_payload); + + /* Calculate the index after the read */ + to_read_idx = read_idx + payload_size_u32; + + /* + * wrap-around case, here we are reading the last element of the queue, therefore set + * to_read_idx, which is the index after the read, to the beginning of the + * queue + */ + if (to_read_idx >= q_size_u32) + to_read_idx = 0; + + /* translate to_read_idx to custom indexing with offset */ + if (REQUIRES_IDX_TRANSLATION(queue)) { + to_read_idx = IDX_TRANSLATE_DEFAULT_TO_CUSTOM(queue, to_read_idx); + HWFNC_DBG_Q("translated to_read_idx:%u rd_wr_idx start:%u factor:%u\n", + to_read_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } + + /* Read the Client Queue */ + *payload = *read_ptr_payload; + + /* update the read index */ + writel_relaxed(to_read_idx, rd_idx_ptr); + + /* update memory for the index */ + wmb(); + + /* Return one if queue still has contents after read */ + return to_read_idx == write_idx ? 0 : 1; +} + +static int _get_update_queue_params(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_queue *queue, u32 *q_size_u32, u32 *payload_size, + u32 *payload_size_u32, u32 **rd_idx_ptr, u32 **wr_ptr) +{ + u32 *tx_wm_ptr; + + if (!queue || !queue->va_header) { + HWFNC_ERR("invalid queue\n"); + return -EINVAL; + } + + *q_size_u32 = (queue->q_size_bytes / sizeof(u32)); + *payload_size = sizeof(struct msm_hw_fence_queue_payload); + *payload_size_u32 = (*payload_size / sizeof(u32)); + + hw_fence_get_queue_idx_ptrs(drv_data, queue->va_header, rd_idx_ptr, wr_ptr, &tx_wm_ptr); + + /* if skipping update wr_index, then use hfi_header->tx_wm instead */ + if (queue->skip_wr_idx) + *wr_ptr = tx_wm_ptr; + + return 0; +} + +int hw_fence_update_queue_helper(struct hw_fence_driver_data *drv_data, u32 client_id, + struct msm_hw_fence_queue *queue, u16 type, u64 ctxt_id, u64 seqno, u64 hash, u64 flags, + u64 client_data, u32 error, int queue_type) +{ + u32 read_idx; + u32 write_idx; + u32 to_write_idx; + u32 q_size_u32; + u32 q_free_u32; + u32 *q_payload_write_ptr; + u32 payload_size, payload_size_u32; + struct msm_hw_fence_queue_payload *write_ptr_payload; + bool lock_client = false; + u32 lock_idx; + u64 timestamp; + u32 *rd_idx_ptr, *wr_ptr; + int ret = 0; + + if (_get_update_queue_params(drv_data, queue, &q_size_u32, &payload_size, + &payload_size_u32, &rd_idx_ptr, &wr_ptr)) { + HWFNC_ERR("Invalid client:%d q_type:%d queue\n", client_id, queue_type); + return -EINVAL; + } + + /* + * We need to lock the client if there is an Rx Queue update, since that + * is the only time when HW Fence driver can have a race condition updating + * the Rx Queue, which also could be getting updated by the Fence CTL + */ + lock_client = _lock_client_queue(queue_type); + if (lock_client) { + lock_idx = (client_id - 1) * HW_FENCE_LOCK_IDX_OFFSET; + + if (lock_idx >= drv_data->client_lock_tbl_cnt) { + HWFNC_ERR("can't reset rxq, lock for client:%d lock_idx:%d exceed max:%d\n", + client_id, lock_idx, drv_data->client_lock_tbl_cnt); + return -EINVAL; + } + HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", client_id, lock_idx); + + /* lock the client rx queue to update */ + GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 1); /* lock */ + } + + /* Make sure data is ready before read */ + mb(); + + /* Get read and write index */ + read_idx = readl_relaxed(rd_idx_ptr); + write_idx = readl_relaxed(wr_ptr); + + HWFNC_DBG_Q("wr client:%d r_ptr:0x%pK w_ptr:0x%pK r_idx:%d w_idx:%d q:0x%pK type:%d s:%s\n", + client_id, rd_idx_ptr, wr_ptr, read_idx, write_idx, queue, queue_type, + queue->skip_wr_idx ? "true" : "false"); + + /* translate read and write indexes from custom indexing to dwords with no offset */ + _translate_queue_indexes_custom_to_default(queue, &read_idx, &write_idx); + + /* Check queue to make sure message will fit */ + q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) : + (read_idx - write_idx); + if (q_free_u32 <= payload_size_u32) { + HWFNC_ERR("cannot fit the message size:%d\n", payload_size_u32); + ret = -EINVAL; + goto exit; + } + HWFNC_DBG_Q("q_free_u32:%d payload_size_u32:%d\n", q_free_u32, payload_size_u32); + + /* Move the pointer where we need to write and cast it */ + q_payload_write_ptr = ((u32 *)queue->va_queue + write_idx); + write_ptr_payload = (struct msm_hw_fence_queue_payload *)q_payload_write_ptr; + HWFNC_DBG_Q("q_payload_write_ptr:0x%pK queue: va=0x%pK pa=0x%llx write_ptr_payload:0x%pK\n", + q_payload_write_ptr, queue->va_queue, queue->pa_queue, write_ptr_payload); + + /* calculate the index after the write */ + to_write_idx = write_idx + payload_size_u32; + + HWFNC_DBG_Q("to_write_idx:%u write_idx:%u payload_size:%u\n", to_write_idx, write_idx, + payload_size_u32); + HWFNC_DBG_L("client_id:%d update %s type:%u hash:%llu ctx:%llu seq:%llu flags:%llu e:%u\n", + client_id, _get_queue_type(queue_type), type, hash, ctxt_id, seqno, flags, error); + + /* + * wrap-around case, here we are writing to the last element of the queue, therefore + * set to_write_idx, which is the index after the write, to the beginning of the + * queue + */ + if (to_write_idx >= q_size_u32) + to_write_idx = 0; + + /* translate to_write_idx to custom indexing with offset */ + if (REQUIRES_IDX_TRANSLATION(queue)) { + to_write_idx = IDX_TRANSLATE_DEFAULT_TO_CUSTOM(queue, to_write_idx); + HWFNC_DBG_Q("translated to_write_idx:%d rd_wr_idx start:%d factor:%d\n", + to_write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } + + /* Update Client Queue */ + writeq_relaxed(payload_size, &write_ptr_payload->size); + writew_relaxed(type, &write_ptr_payload->type); + writew_relaxed(HW_FENCE_PAYLOAD_REV(1, 0), &write_ptr_payload->version); + writeq_relaxed(ctxt_id, &write_ptr_payload->ctxt_id); + writeq_relaxed(seqno, &write_ptr_payload->seqno); + writeq_relaxed(hash | SYNX_HW_FENCE_HANDLE_FLAG, &write_ptr_payload->hash); + writeq_relaxed(flags, &write_ptr_payload->flags); + writeq_relaxed(client_data, &write_ptr_payload->client_data); + writel_relaxed(error, &write_ptr_payload->error); + timestamp = hw_fence_get_qtime(drv_data); + writel_relaxed(timestamp, &write_ptr_payload->timestamp_lo); + writel_relaxed(timestamp >> 32, &write_ptr_payload->timestamp_hi); + + /* update memory for the message */ + wmb(); + + /* update the write index */ + writel_relaxed(to_write_idx, wr_ptr); + + /* update memory for the index */ + wmb(); + +exit: + if (lock_client) + GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0); /* unlock */ + + return ret; +} + +/* + * This function writes to the queue of the client. The 'queue_type' determines + * if this function is writing to the rx or tx queue + */ +int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, + u64 flags, u64 client_data, u32 error, int queue_type) +{ + struct msm_hw_fence_queue *queue; + + if (queue_type >= hw_fence_client->queues_num) { + HWFNC_ERR("Invalid queue type:%d client_id:%d q_num:%d\n", queue_type, + hw_fence_client->client_id, hw_fence_client->queues_num); + return -EINVAL; + } + queue = &hw_fence_client->queues[queue_type]; + + return hw_fence_update_queue_helper(drv_data, hw_fence_client->client_id, queue, + HW_FENCE_PAYLOAD_TYPE_1, ctxt_id, seqno, hash, flags, client_data, error, + queue_type); +} + +int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hash, u32 error) +{ + u32 q_size_u32, payload_size, payload_size_u32, read_idx, write_idx, second_idx; + struct msm_hw_fence_queue_payload tmp, *first_payload, *second_payload; + struct msm_hw_fence_queue *queue; + u32 *rd_idx_ptr, *wr_ptr; + int ret = 0; + + queue = &hw_fence_client->queues[HW_FENCE_TX_QUEUE - 1]; + if (_get_update_queue_params(drv_data, queue, &q_size_u32, &payload_size, + &payload_size_u32, &rd_idx_ptr, &wr_ptr)) { + HWFNC_ERR("Invalid client:%d tx queue\n", hw_fence_client->client_id); + return -EINVAL; + } + + /* Make sure data is ready before read */ + mb(); + + /* Get read and write index */ + read_idx = *rd_idx_ptr; + write_idx = *wr_ptr; + + /* translate read and write indexes from custom indexing to dwords with no offset */ + _translate_queue_indexes_custom_to_default(queue, &read_idx, &write_idx); + + if (read_idx == write_idx) { + HWFNC_DBG_Q("Empty queue, no entry matches with hash:%llu\n", hash); + return -EINVAL; + } + + first_payload = (struct msm_hw_fence_queue_payload *)((u32 *)queue->va_queue + read_idx); + HWFNC_DBG_Q("client:%d txq: va=0x%pK pa=0x%llx idx:%d ptr_payload:0x%pK\n", + hw_fence_client->client_id, queue->va_queue, queue->pa_queue, read_idx, + first_payload); + + if (first_payload->hash == hash) { + /* Swap not needed, update first payload in client queue with fence error */ + first_payload->error = error; + } else { + /* Check whether second entry matches hash */ + second_idx = read_idx + payload_size_u32; + + /* wrap-around case */ + if (second_idx >= q_size_u32) + second_idx = 0; + + if (second_idx == write_idx) { + HWFNC_ERR("Failed to find matching entry with hash:%llu\n", hash); + return -EINVAL; + } + + second_payload = (struct msm_hw_fence_queue_payload *) + ((u32 *)queue->va_queue + second_idx); + HWFNC_DBG_Q("client:%d txq: va=0x%pK pa=0x%llx idx:%d ptr_payload:0x%pK\n", + hw_fence_client->client_id, queue->va_queue, queue->pa_queue, second_idx, + second_payload); + + if (second_payload->hash != hash) { + HWFNC_ERR("hash:%llu not found in first two queue payloads:%u, %u\n", hash, + read_idx, second_idx); + return -EINVAL; + } + + /* swap first and second payload, updating error field in new first payload */ + tmp = *first_payload; + *first_payload = *second_payload; + first_payload->error = error; + *second_payload = tmp; + + HWFNC_DBG_L("client_id:%d txq move from idx:%u to idx:%u hash:%llu c:%llu s:%llu\n", + hw_fence_client->client_id, read_idx, second_idx, hash, tmp.ctxt_id, + tmp.seqno); + } + + /* update memory for the messages */ + wmb(); + + HWFNC_DBG_L("client_id:%d update tx queue index:%u hash:%llu error:%u\n", + hw_fence_client->client_id, read_idx, hash, error); + + return ret; +} + +static int init_global_locks(struct hw_fence_driver_data *drv_data) +{ + struct msm_hw_fence_mem_addr *mem_descriptor; + phys_addr_t phys; + void *ptr; + u32 size; + int ret; + + ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_LOCKS_REGION, &phys, &ptr, + &size, 0); + if (ret) { + HWFNC_ERR("Failed to reserve clients locks mem %d\n", ret); + return -ENOMEM; + } + HWFNC_DBG_INIT("phys:0x%llx ptr:0x%pK size:%d\n", phys, ptr, size); + + /* Populate Memory descriptor with address */ + mem_descriptor = &drv_data->clients_locks_mem_desc; + mem_descriptor->virtual_addr = ptr; + mem_descriptor->device_addr = phys; + mem_descriptor->size = size; + mem_descriptor->mem_data = NULL; /* not storing special info for now */ + + /* Initialize internal pointers for managing the tables */ + drv_data->client_lock_tbl = (u64 *)drv_data->clients_locks_mem_desc.virtual_addr; + drv_data->client_lock_tbl_cnt = drv_data->clients_locks_mem_desc.size / sizeof(u64); + + return 0; +} + +static int init_hw_fences_table(struct hw_fence_driver_data *drv_data) +{ + struct msm_hw_fence_mem_addr *mem_descriptor; + phys_addr_t phys; + void *ptr; + u32 size; + int ret; + + ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_TABLE, &phys, &ptr, + &size, 0); + if (ret) { + HWFNC_ERR("Failed to reserve table mem %d\n", ret); + return -ENOMEM; + } + HWFNC_DBG_INIT("phys:0x%llx ptr:0x%pK size:%d\n", phys, ptr, size); + + /* Populate Memory descriptor with address */ + mem_descriptor = &drv_data->hw_fences_mem_desc; + mem_descriptor->virtual_addr = ptr; + mem_descriptor->device_addr = phys; + mem_descriptor->size = size; + mem_descriptor->mem_data = NULL; /* not storing special info for now */ + + /* Initialize internal pointers for managing the tables */ + drv_data->hw_fences_tbl = (struct msm_hw_fence *)drv_data->hw_fences_mem_desc.virtual_addr; + drv_data->hw_fences_tbl_cnt = drv_data->hw_fences_mem_desc.size / + sizeof(struct msm_hw_fence); + + drv_data->hlos_key_tbl = kcalloc(drv_data->hw_fences_tbl_cnt, sizeof(u64), GFP_KERNEL); + if (!drv_data->hlos_key_tbl) + return -ENOMEM; + + HWFNC_DBG_INIT("hw_fences_table:0x%pK cnt:%u\n", drv_data->hw_fences_tbl, + drv_data->hw_fences_tbl_cnt); + + return 0; +} + +static int init_hw_fences_events(struct hw_fence_driver_data *drv_data) +{ + phys_addr_t phys; + void *ptr; + u32 size; + int ret; + + ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_EVENTS_BUFF, &phys, &ptr, + &size, 0); + if (ret) { + HWFNC_DBG_INFO("Failed to reserve events buffer %d\n", ret); + return -ENOMEM; + } + drv_data->events = (struct msm_hw_fence_event *)ptr; + drv_data->total_events = size / sizeof(struct msm_hw_fence_event); + HWFNC_DBG_INIT("events:0x%pK total_events:%u event_sz:%lu total_size:%u\n", + drv_data->events, drv_data->total_events, sizeof(struct msm_hw_fence_event), size); + + return 0; +} + +static int init_ctrl_queue(struct hw_fence_driver_data *drv_data) +{ + struct msm_hw_fence_mem_addr *mem_descriptor; + int ret; + + mem_descriptor = &drv_data->ctrl_queue_mem_desc; + + /* Init ctrl queue */ + ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CTRL_QUEUE, + mem_descriptor, drv_data->ctrl_queues, + HW_FENCE_CTRL_QUEUES, 0); + if (ret) + HWFNC_ERR("Failure to init ctrl queue\n"); + + return ret; +} + +static void hw_fence_dma_fence_init_hash_table(struct hw_fence_driver_data *drv_data) +{ + hash_init(drv_data->dma_fence_table); + spin_lock_init(&drv_data->dma_fence_table_lock); +} + +int hw_fence_init(struct hw_fence_driver_data *drv_data) +{ + int ret; + __le32 *mem; + + ret = hw_fence_utils_parse_dt_props(drv_data); + if (ret) { + HWFNC_ERR("failed to set dt properties\n"); + goto exit; + } + + /* Allocate hw fence driver mem pool and share it with HYP */ + ret = hw_fence_utils_alloc_mem(drv_data); + if (ret) { + HWFNC_ERR("failed to alloc base memory\n"); + goto exit; + } + + /* Initialize ctrl queue */ + ret = init_ctrl_queue(drv_data); + if (ret) + goto exit; + + ret = init_global_locks(drv_data); + if (ret) + goto exit; + HWFNC_DBG_INIT("Locks allocated at 0x%pK total locks:%d\n", drv_data->client_lock_tbl, + drv_data->client_lock_tbl_cnt); + + /* Initialize hw fences table */ + ret = init_hw_fences_table(drv_data); + if (ret) + goto exit; + + /* Initialize event log */ + ret = init_hw_fences_events(drv_data); + if (ret) + HWFNC_DBG_INFO("Unable to init events\n"); + + /* Map ipcc registers */ + ret = hw_fence_utils_map_ipcc(drv_data); + if (ret) { + HWFNC_ERR("ipcc regs mapping failed\n"); + goto exit; + } + + /* Map time register */ + ret = hw_fence_utils_map_qtime(drv_data); + if (ret) { + HWFNC_ERR("qtime reg mapping failed\n"); + goto exit; + } + + /* Init debugfs */ + ret = hw_fence_debug_debugfs_register(drv_data); + if (ret) { + HWFNC_ERR("debugfs init failed\n"); + goto exit; + } + + /* Init irq from fctl */ + if (drv_data->has_soccp) + ret = hw_fence_utils_init_soccp_irq(drv_data); + else + ret = hw_fence_utils_init_virq(drv_data); + if (ret) { + HWFNC_ERR("failed to init irq has_soccp:%s\n", drv_data->has_soccp ? "true" : + "false"); + goto exit; + } + + if (drv_data->has_soccp) { + ret = hw_fence_utils_register_soccp_ssr_notifier(drv_data); + if (ret) { + HWFNC_ERR("failed to register for soccp ssr notification\n"); + goto exit; + } + } + + hw_fence_dma_fence_init_hash_table(drv_data); + + mem = drv_data->io_mem_base; + HWFNC_DBG_H("memory ptr:0x%pK val:0x%x\n", mem, *mem); + + HWFNC_DBG_INIT("HW Fences Table Initialized: 0x%pK cnt:%d\n", + drv_data->hw_fences_tbl, drv_data->hw_fences_tbl_cnt); + +exit: + return ret; +} + +int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + struct msm_hw_fence_mem_addr *mem_descriptor) +{ + int ret; + + if (!drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].type) { + HWFNC_ERR("invalid client_id:%d not reserved client queue; check dt props\n", + hw_fence_client->client_id); + return -EINVAL; + } + + /* Init client queues */ + ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CLIENT_QUEUE, + &hw_fence_client->mem_descriptor, hw_fence_client->queues, + drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].type->queues_num, + hw_fence_client->client_id); + if (ret) { + HWFNC_ERR("Failure to init the queue for client:%d\n", + hw_fence_client->client_id); + goto exit; + } + + /* Init client memory descriptor */ + if (!IS_ERR_OR_NULL(mem_descriptor)) + memcpy(mem_descriptor, &hw_fence_client->mem_descriptor, + sizeof(struct msm_hw_fence_mem_addr)); + else + HWFNC_DBG_L("null mem descriptor, skipping copy\n"); + +exit: + return ret; +} + +int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client) +{ + int client_id, ret = 0; + + /* + * Initialize IPCC Signals for this client + * + * NOTE: For each Client HW-Core, the client drivers might be the ones making + * it's own initialization (in case that any hw-sequence must be enforced), + * however, if that is not the case, any per-client ipcc init to enable the + * signaling, can go here. + */ + switch ((int)hw_fence_client->client_id_ext) { + case HW_FENCE_CLIENT_ID_CTX0: + /* nothing to initialize for gpu client */ + break; +#if IS_ENABLED(CONFIG_DEBUG_FS) + case HW_FENCE_CLIENT_ID_VAL0: + case HW_FENCE_CLIENT_ID_VAL1: + case HW_FENCE_CLIENT_ID_VAL2: + case HW_FENCE_CLIENT_ID_VAL3: + case HW_FENCE_CLIENT_ID_VAL4: + case HW_FENCE_CLIENT_ID_VAL5: + case HW_FENCE_CLIENT_ID_VAL6: + /* initialize ipcc signals for val clients */ + HWFNC_DBG_H("init_controller_signal: val client_id_ext:%d init:%d\n", + hw_fence_client->client_id_ext, drv_data->ipcc_val_initialized); + + if (!drv_data->ipcc_val_initialized) { + drv_data->ipcc_val_initialized = true; + client_id = hw_fence_utils_get_client_id_priv(drv_data, + HW_FENCE_CLIENT_ID_VAL0); + + if (drv_data->has_soccp) { + /* init input-soccp signals for val clients */ + hw_fence_ipcc_enable_client_signal_pairs(drv_data, client_id); + } + } + break; +#endif /* CONFIG_DEBUG_FS */ + case HW_FENCE_CLIENT_ID_CTL0: + case HW_FENCE_CLIENT_ID_CTL1: + case HW_FENCE_CLIENT_ID_CTL2: + case HW_FENCE_CLIENT_ID_CTL3: + case HW_FENCE_CLIENT_ID_CTL4: + case HW_FENCE_CLIENT_ID_CTL5: + /* initialize ipcc signals for dpu clients */ + HWFNC_DBG_H("init_controller_signal: DPU client_id_ext:%d initialized:%d\n", + hw_fence_client->client_id_ext, drv_data->ipcc_dpu_initialized); + if (!drv_data->ipcc_dpu_initialized) { + drv_data->ipcc_dpu_initialized = true; + client_id = hw_fence_utils_get_client_id_priv(drv_data, + HW_FENCE_CLIENT_ID_CTL0); + + /* Init dpu client ipcc signal */ + hw_fence_ipcc_enable_protocol(drv_data, client_id); + hw_fence_ipcc_enable_client_signal_pairs(drv_data, client_id); + } + break; + case HW_FENCE_CLIENT_ID_IPE ... HW_FENCE_CLIENT_ID_IPE + + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: + /* nothing to initialize for IPE client */ + break; + case HW_FENCE_CLIENT_ID_VPU ... HW_FENCE_CLIENT_ID_VPU + + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: + /* nothing to initialize for VPU client */ + break; + case HW_FENCE_CLIENT_ID_IPA ... HW_FENCE_CLIENT_ID_IPA + + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: + /* nothing to initialize for IPA clients */ + break; + case HW_FENCE_CLIENT_ID_IFE0 ... HW_FENCE_CLIENT_ID_IFE11 + + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: + /* nothing to initialize for IFE clients */ + break; + default: + HWFNC_ERR("Unexpected client_id_ext:%d\n", hw_fence_client->client_id_ext); + ret = -EINVAL; + break; + } + + return ret; +} + +int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client) +{ + + /* + * Initialize Fence Controller resources for this Client, + * here we need to use the CTRL queue to communicate to the Fence + * Controller the shared memory for the Rx/Tx queue for this client + * as well as any information that Fence Controller might need to + * know for this client. + * + * NOTE: For now, we are doing a static allocation of the + * client's queues, so currently we don't need any notification + * to the Fence CTL here through the CTRL queue. + * Later-on we might need it, once the PVM to SVM (and vice versa) + * communication for initialization is supported. + */ + + return 0; +} + +void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client) +{ + /* + * Deallocate any resource allocated for this client. + * If fence controller was notified about existence of this client, + * we will need to notify fence controller that this client is gone + * + * NOTE: Since currently we are doing a 'fixed' memory for the clients queues, + * we don't need any notification to the Fence Controller, yet.. + * however, if the memory allocation is removed from 'fixed' to a dynamic + * allocation, then we will need to notify FenceCTL about the client that is + * going-away here. + */ + mutex_lock(&drv_data->clients_register_lock); + drv_data->clients[hw_fence_client->client_id] = NULL; + mutex_unlock(&drv_data->clients_register_lock); + + /* Deallocate client's object */ + HWFNC_DBG_LUT("freeing client_id:%d\n", hw_fence_client->client_id); + kfree(hw_fence_client); +} + +static inline int _calculate_hash(u64 context, u64 seqno, u64 m_size) +{ + u64 a_multiplier = HW_FENCE_HASH_A_MULT; + u64 c_multiplier = HW_FENCE_HASH_C_MULT; + u64 b_multiplier = context + (context - 1); /* odd multiplier */ + + /* + * if m, is power of 2, we can optimize with right shift, + * for now we don't do it, to avoid assuming a power of two + */ + return (a_multiplier * seqno * b_multiplier + (c_multiplier * context)) % m_size; +} + +static inline struct msm_hw_fence *_get_hw_fence(u32 table_total_entries, + struct msm_hw_fence *hw_fences_tbl, + u64 hash) +{ + if (hash >= table_total_entries) { + HWFNC_ERR("hash:%llu out of max range:%u\n", + hash, table_total_entries); + return NULL; + } + + return &hw_fences_tbl[hash]; +} + +static int _hw_fence_lookup_next(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence **hw_fence, u64 *hash, u32 init_step, u32 incr, u32 m_size) +{ + *hash = (*hash + incr) % m_size; + *hw_fence = _get_hw_fence(m_size, drv_data->hw_fences_tbl, *hash); + if (!*hw_fence) { + HWFNC_ERR("failed to get hw-fence hash:%llu\n", *hash); + return m_size; + } + GLOBAL_ATOMIC_STORE(drv_data, &(*hw_fence)->lock, 1); + + return init_step + incr; +} + +/* returns initial step value and initializes hash and hw_fence */ +static int _hw_fence_iterator_init(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence **hw_fence, u64 *hash, u64 context, u64 seqno, u32 start_step, + u32 end_step) +{ + u32 m_size; + + if (!drv_data || !hw_fence || !hash || start_step >= end_step || + end_step > drv_data->hw_fences_tbl_cnt) { + HWFNC_ERR("invalid drv_data:0x%pK hwf:0x%pK h:0x%pK start:%u end:%u tbl_size:%u\n", + drv_data, hw_fence, hash, start_step, end_step, + drv_data ? drv_data->hw_fences_tbl_cnt : -1); + return end_step; + } + + m_size = drv_data->hw_fences_tbl_cnt; + *hash = _calculate_hash(context, seqno, m_size); + HWFNC_DBG_LUT("ctx:%llu seq:%llu tbl_size:%u start_step:%u initial_hash:%llu\n", context, + seqno, m_size, start_step, *hash); + + return _hw_fence_lookup_next(drv_data, hw_fence, hash, 0, start_step, m_size); +} + +/* returns new step value and populates hash and hw_fence */ +static int _hw_fence_iterator_next(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence **hw_fence, u64 *hash, u32 curr_step, u32 end_step, bool found) +{ + u32 m_size = drv_data->hw_fences_tbl_cnt; + + /* unlock previous entry */ + GLOBAL_ATOMIC_STORE(drv_data, &(*hw_fence)->lock, 0); + if ((curr_step + 1) >= end_step || found) { + HWFNC_DBG_LUT("found:%s step:%d max:%d h:%llu v:%u ctx:%llu seq:%llu flg:0x%llx\n", + found ? "true" : "false", curr_step, end_step, *hash, (*hw_fence)->valid, + (*hw_fence)->ctx_id, (*hw_fence)->seq_id, (*hw_fence)->flags); + return found ? curr_step : curr_step + 1; + } + + HWFNC_DBG_LUT("cmp failed resolving collision step:%u max:%u hash:%llu\n", curr_step + 1, + end_step, *hash); + + return _hw_fence_lookup_next(drv_data, hw_fence, hash, curr_step, 1, m_size); +} + +static bool _hw_fence_match(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hw_fence, + u64 hash, u64 context, u64 seqno, u64 hlos_key) +{ + return (hw_fence->ctx_id == context) && (hw_fence->seq_id == seqno) + && (drv_data->hlos_key_tbl[hash] == hlos_key); +} + +/* clears everything but the 'valid' field */ +static void _cleanup_hw_fence(struct msm_hw_fence *hw_fence) +{ + int i; + + hw_fence->error = 0; + wmb(); /* update memory to avoid mem-abort */ + hw_fence->ctx_id = 0; + hw_fence->seq_id = 0; + hw_fence->wait_client_mask = 0; + hw_fence->fence_allocator = 0; + hw_fence->fence_signal_client = 0; + + hw_fence->flags = 0; + + hw_fence->fence_create_time = 0; + hw_fence->fence_trigger_time = 0; + hw_fence->fence_wait_time = 0; + hw_fence->refcount = 0; + hw_fence->parents_cnt = 0; + hw_fence->pending_child_cnt = 0; + hw_fence->h_synx = 0; + + for (i = 0; i < MSM_HW_FENCE_MAX_JOIN_PARENTS; i++) + hw_fence->parent_list[i] = HW_FENCE_INVALID_PARENT_FENCE; + + memset(hw_fence->client_data, 0, sizeof(hw_fence->client_data)); +} + +/* This function must be called with the hw fence lock */ +static int _unreserve_hw_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u32 hash) +{ + if (hw_fence->refcount & HW_FENCE_HLOS_REFCOUNT_MASK) + hw_fence->refcount--; + else + return -EINVAL; /* keep hw-fence in table for debugging purposes */ + + /* if both hlos and fctl refcounts are cleared, then delete the fence */ + if (!hw_fence->refcount) { + _cleanup_hw_fence(hw_fence); + + /* unreserve this HW fence */ + hw_fence->valid = 0; + + /** + * Note: If last hwfence refcount is removed from fctl then this entry will not be + * cleared. This is okay because the entry will be set to a new value at the time + * of next fence creation. + */ + drv_data->hlos_key_tbl[hash] = 0; + } + + HWFNC_DBG_LUT("Removed ref on fence alloc:%d ctx:%llu seq:%llu refcount:0x%x hash:%u\n", + hw_fence->fence_allocator, hw_fence->ctx_id, hw_fence->seq_id, hw_fence->refcount, + hash); + + return 0; +} + +int hw_fence_destroy_refcount(struct hw_fence_driver_data *drv_data, u64 hash, u32 ref) +{ + struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; + struct msm_hw_fence *hw_fence = NULL; + int ret = 0; + + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, hash); + if (!hw_fence) { + HWFNC_ERR("bad hw fence hash:%llu\n", hash); + return -EINVAL; + } + + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ + if (hw_fence->refcount & ref) { + hw_fence->refcount &= ~ref; + } else { + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + HWFNC_ERR("fence ctx:%llu seq:%llu hash:%llu ref:0x%x before destroy ref:0x%x\n", + hw_fence->ctx_id, hw_fence->seq_id, hash, hw_fence->refcount, ref); + /* keep hw-fence in table for debugging purposes */ + return -EINVAL; + } + if (!hw_fence->refcount) { + _cleanup_hw_fence(hw_fence); + + /* unreserve this HW fence */ + hw_fence->valid = 0; + } + + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + HWFNC_DBG_H("Removed 0x%x refcount on fence hash:%llu ref:0x%x\n", ref, hash, + hw_fence->refcount); + + return ret; +} + +/* This function must be called with the hw fence lock */ +static int _reserve_hw_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u32 client_id, u64 context, + u64 seqno, u32 hash, u32 pending_child_cnt, u64 hlos_key) +{ + _cleanup_hw_fence(hw_fence); + + /* reserve this HW fence */ + hw_fence->valid = 1; + + hw_fence->ctx_id = context; + hw_fence->seq_id = seqno; + hw_fence->fence_allocator = client_id; + hw_fence->fence_create_time = hw_fence_get_qtime(drv_data); + /* one released by importing client; one released by FCTL */ + hw_fence->refcount = HW_FENCE_FCTL_REFCOUNT + 1; + + hw_fence->pending_child_cnt = pending_child_cnt; + + drv_data->hlos_key_tbl[hash] = hlos_key; + + HWFNC_DBG_LUT("Reserved fence client:%d ctx:%llu seq:%llu pending_child:%u hash:%u\n", + client_id, context, seqno, pending_child_cnt, hash); + + return 0; +} + +/* This function must be called with the hw fence lock */ +static int _fence_found(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hw_fence, + u32 hash) +{ + if ((hw_fence->refcount & HW_FENCE_HLOS_REFCOUNT_MASK) == HW_FENCE_HLOS_REFCOUNT_MASK) + return -EINVAL; + + /* + * Increment the hw-fence refcount. All other processing is done outside. After processing + * is done, the refcount needs to be decremented either explicitly by the client or as part + * of processing in HW Fence Driver. + */ + hw_fence->refcount++; + HWFNC_DBG_LUT("Found fence alloc:%d ctx:%llu seq:%llu refcount:0x%x hash:%u\n", + hw_fence->fence_allocator, hw_fence->ctx_id, hw_fence->seq_id, hw_fence->refcount, + hash); + + return 0; +} + +struct msm_hw_fence *_hw_fence_lookup_and_create_range(struct hw_fence_driver_data *drv_data, + u32 client_id, u64 hlos_key, u64 context, u64 seqno, u32 pending_child_cnt, u64 *hash, + u32 start_step, u32 end_step, u64 flags) +{ + struct msm_hw_fence *hw_fence; + bool hw_fence_found; + int ret = 0; + u32 step; + + if (!drv_data || !hash) { + HWFNC_ERR("Invalid input for hw_fence_lookup drv_data:0x%pK hash:0x%pK\n", + drv_data, hash); + return NULL; + } + + for_each_hw_fence(drv_data, &hw_fence, hash, context, seqno, start_step, end_step, + step, hw_fence_found) { + if (!hw_fence->valid) { + /* Process the hw fence found by the algorithm */ + ret = _reserve_hw_fence(drv_data, hw_fence, client_id, context, seqno, + *hash, pending_child_cnt, hlos_key); + + /* update memory table with processing */ + wmb(); + + HWFNC_DBG_L("client_id:%u ctx:%llu seqno:%llu hash:%llu step:%u\n", + client_id, context, seqno, *hash, step); + + hw_fence_found = true; + } else if (_hw_fence_match(drv_data, hw_fence, *hash, context, seqno, hlos_key)) { + hw_fence_found = true; + if (flags & MSM_HW_FENCE_FLAG_CREATE_SIGNALED) + ret = _fence_found(drv_data, hw_fence, *hash); + else + ret = -EALREADY; + + HWFNC_DBG_L("client_id:%u ctx:%llu seqno:%llu hash:%llu step:%u\n", + client_id, context, seqno, *hash, step); + } + } + + if (ret == -EALREADY) { + HWFNC_ERR("can't create hfence w/ same ctx:%llu seq:%llu hlos_key:0x%pK\n", + context, seqno, (context == hlos_key) ? NULL : (void *)hlos_key); + return NULL; + } + + /* If we iterated through the whole list and didn't find available fences, return null */ + if (!hw_fence_found || ret) { + HWFNC_DBG_LUT("fail to process create hw_fence ctx:%llu seq:%llu start:%u end:%u\n", + context, seqno, start_step, end_step); + return NULL; + } + + return hw_fence; +} + +struct msm_hw_fence *_hw_fence_lookup_and_create(struct hw_fence_driver_data *drv_data, + u32 client_id, u64 hlos_key, u64 context, u64 seqno, u32 pending_child_cnt, u64 *hash) +{ + return _hw_fence_lookup_and_create_range(drv_data, client_id, hlos_key, context, seqno, + pending_child_cnt, hash, 0, drv_data->hw_fences_tbl_cnt, 0); +} + +struct msm_hw_fence *_hw_fence_lookup_and_process_range(struct hw_fence_driver_data *drv_data, + u64 hlos_key, u64 context, u64 seqno, u64 *hash, u32 start_step, u32 end_step, + int (*process_fn)(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hfence, + u32 hash)) +{ + struct msm_hw_fence *hw_fence; + bool hw_fence_found; + int ret = 0; + u32 step; + + if (!drv_data || !hash || !process_fn) { + HWFNC_ERR("Invalid input drv_data:0x%pK hash:0x%pK process_fn:0x%pK\n", + drv_data, hash, process_fn); + return NULL; + } + + for_each_hw_fence(drv_data, &hw_fence, hash, context, seqno, start_step, end_step, step, + hw_fence_found) { + if (_hw_fence_match(drv_data, hw_fence, *hash, context, seqno, hlos_key)) { + /* Process the hw fence found by the algorithm */ + ret = process_fn(drv_data, hw_fence, *hash); + HWFNC_DBG_L("ctx:%llu seqno:%llu hash:%llu step:%u\n", context, seqno, + *hash, step); + hw_fence_found = true; + } + } + + /* If we iterated through the whole list and didn't find available fences, return null */ + if (!hw_fence_found || ret) { + HWFNC_DBG_LUT("fail to process create hw_fence ctx:%llu seq:%llu\n", + context, seqno); + return NULL; + } + + return hw_fence; +} + +struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *drv_data, + u64 hlos_key, u64 context, u64 seqno, u64 *hash, + int (*process_fn)(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hfence, + u32 hash)) +{ + return _hw_fence_lookup_and_process_range(drv_data, hlos_key, context, seqno, hash, 0, + drv_data->hw_fences_tbl_cnt, process_fn); +} + + +struct dma_fence *hw_dma_fence_init(struct msm_hw_fence_client *hw_fence_client, u64 context, + u64 seqno) +{ + struct hw_dma_fence *fence; + spinlock_t *fence_lock; + + /* create dma fence */ + fence_lock = kzalloc(sizeof(*fence_lock), GFP_ATOMIC); + if (!fence_lock) + return ERR_PTR(-ENOMEM); + + fence = kzalloc(sizeof(*fence), GFP_ATOMIC); + if (!fence) { + kfree(fence_lock); + return ERR_PTR(-ENOMEM); + } + + snprintf(fence->name, HW_FENCE_NAME_SIZE, "hwfence:id:%d:ctx=%llu:seqno:%llu", + hw_fence_client->client_id, context, seqno); + spin_lock_init(fence_lock); + + HWFNC_DBG_L("creating dma_fence for client:%d ctx:%llu seqno:%llu\n", + hw_fence_client->client_id, context, seqno); + + dma_fence_init(&fence->base, &hw_fence_dbg_ops, fence_lock, context, seqno); + fence->client_handle = hw_fence_client; + + return (struct dma_fence *)fence; +} + +int hw_fence_dma_fence_table_add(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 hw_fence_hash) +{ + struct hw_dma_fence *hw_dma_fence; + u32 dma_fence_key = hw_fence_hash % DMA_FENCE_HASH_TABLE_SIZE; + unsigned long flags; + + if (!fence || !drv_data || !hw_fence_client) { + HWFNC_ERR("invalid params fence:0x%pK drv_data:0x%pK hw_fence_client:0x%pK\n", + fence, drv_data, hw_fence_client); + return -EINVAL; + } + + hw_dma_fence = to_hw_dma_fence(fence); + HWFNC_DBG_L("add hw_dma_fence:%pK client:%d ctx:%llu seqno:%llu key:%u hash:%llu\n", + hw_dma_fence, hw_fence_client->client_id, fence->context, fence->seqno, + dma_fence_key, hw_fence_hash); + + hw_dma_fence->dma_fence_key = dma_fence_key; + hw_dma_fence->is_internal = true; + hw_dma_fence->signal_cb.hash = hw_fence_hash; + hw_dma_fence->signal_cb.drv_data = drv_data; + + spin_lock_irqsave(&drv_data->dma_fence_table_lock, flags); + hash_add(drv_data->dma_fence_table, &hw_dma_fence->node, dma_fence_key); + spin_unlock_irqrestore(&drv_data->dma_fence_table_lock, flags); + + return 0; +} + +static void msm_hw_fence_internal_signal_callback(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct hw_fence_signal_cb *signal_cb; + + if (!fence || !cb) { + HWFNC_ERR("Invalid params fence:0x%pK cb:0x%pK\n", fence, cb); + return; + } + + HWFNC_DBG_IRQ("dma-fence signal callback ctx:%llu seqno:%llu flags:%lx err:%d\n", + fence->context, fence->seqno, fence->flags, fence->error); + + signal_cb = (struct hw_fence_signal_cb *)cb; + + if (hw_fence_signal_fence(signal_cb->drv_data, fence, signal_cb->hash, fence->error, false)) + HWFNC_ERR("failed to signal fence ctx:%llu seq:%llu hash:%llu err:%u\n", + fence->context, fence->seqno, signal_cb->hash, fence->error); +} + + +struct dma_fence *hw_fence_internal_dma_fence_create(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 *hash) +{ + struct hw_dma_fence *hw_dma_fence; + struct msm_hw_fence *hw_fence; + struct dma_fence *fence; + u64 context, seqno; + int ret = 0; + + if (!drv_data || !hw_fence_client || !hash) + return ERR_PTR(-EINVAL); + + context = hw_fence_client->context_id; + seqno = atomic_add_return(1, &hw_fence_client->seqno); + fence = hw_dma_fence_init(hw_fence_client, context, seqno); + if (IS_ERR_OR_NULL(fence)) { + HWFNC_ERR("failed to create internal dma-fence client:%d ctx:%llu seq:%llu\n", + hw_fence_client->client_id, context, seqno); + return ERR_PTR(-EINVAL); + } + + ret = hw_fence_create(drv_data, hw_fence_client, (u64)fence, context, seqno, hash); + if (ret) { + HWFNC_ERR("failed to back internal dma-fence client:%d ctx:%llu seq:%llu\n", + hw_fence_client->client_id, context, seqno); + ret = -EINVAL; + goto error; + } + + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, drv_data->hw_fences_tbl, *hash); + if (!hw_fence) { + HWFNC_ERR("bad hw fence hash:%llu client:%u\n", *hash, hw_fence_client->client_id); + ret = -EINVAL; + goto error; + } + + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ + hw_fence->flags |= MSM_HW_FENCE_FLAG_INTERNAL_OWNED; + hw_fence->refcount |= HW_FENCE_DMA_FENCE_REFCOUNT; + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + + /* If no error, set the HW Fence Flag in the dma-fence */ + set_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags); + + ret = hw_fence_dma_fence_table_add(drv_data, hw_fence_client, fence, *hash); + if (ret) { + HWFNC_ERR("failed to add hw-fence ctx:%llu seq:%llu hash:%llu to dma-fence table\n", + context, seqno, *hash); + ret = -EINVAL; + goto error; + } + + hw_dma_fence = to_hw_dma_fence(fence); + /* internal_signal_callback does not take an additional hw-fence refcount */ + ret = dma_fence_add_callback(fence, &hw_dma_fence->signal_cb.fence_cb, + msm_hw_fence_internal_signal_callback); + if (ret) + HWFNC_ERR("Failed to add signal callback ctx:%llu seq:%llu hash:%llu ret:%d\n", + context, seqno, *hash, ret); + +error: + if (ret) { + dma_fence_put(fence); + return ERR_PTR(ret); + } + + return fence; +} + +struct dma_fence *hw_fence_dma_fence_find(struct hw_fence_driver_data *drv_data, + u64 hw_fence_hash, bool incr_refcount) +{ + u32 dma_fence_key = hw_fence_hash % DMA_FENCE_HASH_TABLE_SIZE; + struct hw_dma_fence *hw_dma_fence = NULL, *curr; + struct dma_fence *fence = NULL; + unsigned long flags; + + spin_lock_irqsave(&drv_data->dma_fence_table_lock, flags); + hash_for_each_possible(drv_data->dma_fence_table, curr, node, dma_fence_key) { + if (hw_fence_hash == curr->signal_cb.hash) { + hw_dma_fence = curr; + fence = &hw_dma_fence->base; + if (incr_refcount) + dma_fence_get(fence); + break; + } + } + spin_unlock_irqrestore(&drv_data->dma_fence_table_lock, flags); + + HWFNC_DBG_L("hw_dma_fence: %s:%pK ctx:%llu seqno:%llu key:%u dma_fence_ref:%u incr:%s\n", + fence ? "found" : "not found", hw_dma_fence, + fence ? fence->context : 0, fence ? fence->seqno : 0, + dma_fence_key, fence ? kref_read(&fence->refcount) : -1, + incr_refcount ? "true" : "false"); + + return fence; +} + +static int hw_fence_dma_fence_table_del(struct hw_fence_driver_data *drv_data, u64 hash, + u64 flags, u32 error) +{ + struct hw_dma_fence *hw_dma_fence; + struct dma_fence *fence; + unsigned long lock_flags; + int ret = 0; + + fence = hw_fence_dma_fence_find(drv_data, hash, false); + if (IS_ERR_OR_NULL(fence)) + return PTR_ERR(fence); + + hw_dma_fence = to_hw_dma_fence(fence); + + HWFNC_DBG_L("removing dma_fence ctx:%llu seqno:%llu key:%u dma_fence_ref:%u\n", + fence->context, fence->seqno, hw_dma_fence->dma_fence_key, + kref_read(&fence->refcount)); + + spin_lock_irqsave(&drv_data->dma_fence_table_lock, lock_flags); + /* remove dma-fence from the internal hash table */ + if (hash_hashed(&hw_dma_fence->node)) + hash_del(&hw_dma_fence->node); + else + ret = -EINVAL; + spin_unlock_irqrestore(&drv_data->dma_fence_table_lock, lock_flags); + + if (ret) + HWFNC_ERR("internally owned dma-fence is not in table ctx:%llu seqno:%llu key:%u\n", + fence->context, fence->seqno, hw_dma_fence->dma_fence_key); + + /* avoid signaling hw-fence when releasing hlos ref */ + dma_fence_remove_callback(fence, &hw_dma_fence->signal_cb.fence_cb); + + spin_lock_irqsave(fence->lock, lock_flags); + if (!dma_fence_is_signaled(fence)) { + if (!(flags & MSM_HW_FENCE_FLAG_SIGNAL)) + error = SYNX_STATE_SIGNALED_CANCEL; + if (error) + dma_fence_set_error(fence, -error); + dma_fence_signal_locked(fence); + } + spin_unlock_irqrestore(fence->lock, lock_flags); + dma_fence_put(fence); + + return ret; +} + +int hw_fence_create(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hlos_key, u64 context, + u64 seqno, u64 *hash) +{ + u32 client_id = hw_fence_client->client_id; + int ret = 0; + + /* allocate hw fence in table */ + if (!_hw_fence_lookup_and_create(drv_data, client_id, hlos_key, context, seqno, 0, hash)) { + HWFNC_ERR("Fail to create fence client:%u ctx:%llu seqno:%llu\n", + client_id, context, seqno); + ret = -EINVAL; + } + + /** + * Note: This addresses any race conditions where clients may have been in progress + * creating hw-fences when soccp crashes + */ + if (!drv_data->fctl_ready) { + HWFNC_ERR("unable to create hw-fence while fctl is not in valid state\n"); + hw_fence_destroy_refcount(drv_data, *hash, HW_FENCE_FCTL_REFCOUNT); + hw_fence_destroy_with_hash(drv_data, hw_fence_client, *hash); + return -EAGAIN; + } + + if (hw_fence_client->skip_fctl_ref) { + ret = hw_fence_destroy_refcount(drv_data, *hash, HW_FENCE_FCTL_REFCOUNT); + if (ret) + HWFNC_ERR("Can't remove fctl ref client:%u ctx:%llu seqno:%llu hash:%llu\n", + client_id, context, seqno, *hash); + } + + return ret; +} + +int hw_fence_destroy(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hlos_key, u64 context, u64 seqno) +{ + u32 client_id = hw_fence_client->client_id; + int ret = 0; + u64 hash; + + /* decrement refcount on hw-fence */ + if (!_hw_fence_lookup_and_process(drv_data, hlos_key, context, seqno, &hash, + &_unreserve_hw_fence)) { + HWFNC_ERR("Fail removing ref on fence client:%u ctx:%llu seqno:%llu\n", + client_id, context, seqno); + ret = -EINVAL; + } + + return ret; +} + +/* + * This must be called while holding hw-fence lock; this releases hw-fence lock and (if needed) + * associated dma-fence if necessary + */ +static int hw_fence_put_and_unlock(struct hw_fence_driver_data *drv_data, u32 client_id, + struct msm_hw_fence *hw_fence, u64 hash) +{ + bool release_dma = false; + int ret = 0; + u64 flags; + u32 error; + + if (hw_fence->refcount & HW_FENCE_HLOS_REFCOUNT_MASK) { + hw_fence->refcount--; + } else { + ret = -EINVAL; + goto end; /* keep hw-fence in table for debugging purposes */ + } + + if ((hw_fence->flags & MSM_HW_FENCE_FLAG_INTERNAL_OWNED) && + !(hw_fence->refcount & HW_FENCE_HLOS_REFCOUNT_MASK)) { + hw_fence->flags &= ~MSM_HW_FENCE_FLAG_INTERNAL_OWNED; + release_dma = true; + flags = hw_fence->flags; + error = hw_fence->error; + } + + if (!hw_fence->refcount) { + _cleanup_hw_fence(hw_fence); + + /* unreserve this HW fence */ + hw_fence->valid = 0; + } + +end: + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + + if (ret) { + HWFNC_ERR("fence client:%d ctx:%llu seq:%llu hash:%llu ref:0x%x before decr\n", + client_id, hw_fence->ctx_id, hw_fence->seq_id, hash, hw_fence->refcount); + return ret; + } + + if (release_dma) { + ret = hw_fence_dma_fence_table_del(drv_data, hash, flags, error); + if (ret) + HWFNC_ERR("Failed to delete internal dma-fence for hw-fence hash:%llu\n", + hash); + } + + return ret; +} + +int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hash) +{ + u32 client_id = hw_fence_client ? hw_fence_client->client_id : ~0; + struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; + struct msm_hw_fence *hw_fence = NULL; + + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, hash); + if (!hw_fence) { + HWFNC_ERR("bad hw fence hash:%llu client:%u\n", hash, client_id); + return -EINVAL; + } + + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ + return hw_fence_put_and_unlock(drv_data, client_id, hw_fence, hash); +} + +static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + struct dma_fence_array *array, u64 *hash, bool create) +{ + struct msm_hw_fence *hw_fences_tbl; + struct msm_hw_fence *join_fence = NULL; + u64 context, seqno; + u32 client_id, pending_child_cnt; + + /* + * NOTE: For now we are allocating the join fences from the same table as all + * the other fences (i.e. drv_data->hw_fences_tbl), functionally this will work, however, + * this might impact the lookup algorithm, since the "join-fences" are created with the + * context and seqno of a fence-array, and those might not be changing by the client, + * so this will linearly increment the look-up and very likely impact the other fences if + * these join-fences start to fill-up a particular region of the fences global table. + * So we might have to allocate a different table altogether for these join fences. + * However, to do this, just alloc another table and change it here: + */ + hw_fences_tbl = drv_data->hw_fences_tbl; + + context = array->base.context; + seqno = array->base.seqno; + pending_child_cnt = array->num_fences; + client_id = HW_FENCE_JOIN_FENCE_CLIENT_ID; + + if (create) { + /* allocate the fence */ + join_fence = _hw_fence_lookup_and_create(drv_data, client_id, (u64)array, context, + seqno, pending_child_cnt, hash); + if (!join_fence) + HWFNC_ERR("Fail to create join fence client:%u ctx:%llu seqno:%llu\n", + client_id, context, seqno); + } else if (hw_fence_destroy_refcount(drv_data, *hash, HW_FENCE_FCTL_REFCOUNT)) { + HWFNC_ERR("Fail destroy join fence client:%u ctx:%llu seq:%llu hash:%llu\n", + client_id, context, seqno, *hash); + } + + return join_fence; +} + +struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hlos_key, u64 context, + u64 seqno, u64 *hash) +{ + struct msm_hw_fence *hw_fence; + u32 client_id = hw_fence_client ? hw_fence_client->client_id : ~0; + + /* find the hw fence */ + hw_fence = _hw_fence_lookup_and_process(drv_data, hlos_key, context, seqno, hash, + &_fence_found); + if (!hw_fence) + HWFNC_ERR("Fail to find hw fence client:%u ctx:%llu seqno:%llu\n", + client_id, context, seqno); + + return hw_fence; +} + +static int _fence_ctl_signal(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, + u64 flags, u64 client_data, u32 error, bool signal_from_import) +{ + int ret = 0; + u32 tx_client_id = drv_data->ipcc_client_pid; /* phys id for tx client */ + u32 rx_client_id = hw_fence_client->ipc_client_vid; /* virt id for rx client */ + + HWFNC_DBG_H("We must signal the client now! hfence hash:%llu\n", hash); + + /* Call fence error callback */ + if (error && hw_fence_client->fence_error_cb) { + ret = hw_fence_utils_fence_error_cb(hw_fence_client, hw_fence->ctx_id, + hw_fence->seq_id, hash, flags, error); + } else { + /* Write to Rx queue */ + if (hw_fence_client->signaled_update_rxq || + (hw_fence_client->update_rxq && !signal_from_import)) { + ret = hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id, + hw_fence->seq_id, hash, flags, client_data, error, + HW_FENCE_RX_QUEUE - 1); + if (ret) { + HWFNC_ERR("Can't update rxq clt:%d h:%llu ctx:%llu sq:%llu e:%d\n", + hw_fence_client ? hw_fence_client->client_id : -1, hash, + hw_fence->ctx_id, hw_fence->seq_id, error); + return ret; + } + } + +#if IS_ENABLED(CONFIG_DEBUG_FS) + /* signal validation clients on targets with vm through custom mechanism */ + if (!drv_data->has_soccp && hw_fence_client->client_id >= HW_FENCE_CLIENT_ID_VAL0 && + hw_fence_client->client_id <= HW_FENCE_CLIENT_ID_VAL6) { + ret = process_validation_client_loopback(drv_data, + hw_fence_client->client_id); + return ret; + } +#endif /* CONFIG_DEBUG_FS */ + + /* Signal the hw fence now */ + if (hw_fence_client->signaled_send_ipc || !signal_from_import) + hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, + hw_fence_client->ipc_signal_id); + } + + return ret; +} + +static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, int iteration, struct dma_fence_array *array, + struct msm_hw_fence *join_fence, u64 hash_join_fence) +{ + struct dma_fence *child_fence; + struct msm_hw_fence *hw_fence_child; + bool child_is_signaled; + int idx, j; + u64 hash = 0; + + if (!array->fences) + goto destroy_fence; + + /* cleanup the child-fences from the parent join-fence */ + for (idx = iteration; idx >= 0; idx--) { + child_fence = array->fences[idx]; + if (!child_fence) { + HWFNC_ERR("invalid child fence idx:%d\n", idx); + continue; + } + + hw_fence_child = hw_fence_find_with_dma_fence(drv_data, hw_fence_client, + child_fence, &hash, &child_is_signaled, false); + if (child_is_signaled) { + continue; + } else if (!hw_fence_child) { + HWFNC_ERR("Cannot cleanup child fence context:%llu seqno:%llu hash:%llu\n", + child_fence->context, child_fence->seqno, hash); + + /* + * ideally this should not have happened, but if it did, try to keep + * cleaning-up other fences after printing the error + */ + continue; + } + + /* lock the child while we clean it up from the parent join-fence */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 1); /* lock */ + for (j = hw_fence_child->parents_cnt; j > 0; j--) { + + if (j > MSM_HW_FENCE_MAX_JOIN_PARENTS) { + HWFNC_ERR("Invalid max parents_cnt:%d, will reset to max:%d\n", + hw_fence_child->parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS); + + j = MSM_HW_FENCE_MAX_JOIN_PARENTS; + } + + if (hw_fence_child->parent_list[j - 1] == hash_join_fence) { + hw_fence_child->parent_list[j - 1] = HW_FENCE_INVALID_PARENT_FENCE; + + if (hw_fence_child->parents_cnt) + hw_fence_child->parents_cnt--; + + /* update memory for the table update */ + wmb(); + } + } + /* decrement refcount acquired by finding fence */ + hw_fence_put_and_unlock(drv_data, hw_fence_client->client_id, hw_fence_child, hash); + } + +destroy_fence: + /* destroy join fence */ + _hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence, + false); +} + +/* update join fence for signaled child_fence and return if the join fence should be signaled */ +bool _update_and_get_join_fence_signal_status(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *join_fence, u32 child_fence_error) +{ + bool signal_join_fence, error = false; + + /* child fence is already signaled */ + GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */ + join_fence->error |= child_fence_error; + if (join_fence->pending_child_cnt) + join_fence->pending_child_cnt--; + else + error = true; + signal_join_fence = !join_fence->pending_child_cnt; + GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */ + + /* update memory for the table update */ + wmb(); + + if (error) + HWFNC_ERR("join fence ctx:%llu seq:%llu pending_child_cnt==0 before decrement\n", + join_fence->ctx_id, join_fence->seq_id); + + return signal_join_fence; +} + +int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array, + u64 *hash_join_fence, u64 client_data) +{ + struct msm_hw_fence *join_fence; + struct msm_hw_fence *hw_fence_child; + struct dma_fence *child_fence; + bool child_is_signaled, signal_join_fence = false; + u64 hash; + int i, ret = 0; + enum hw_fence_client_data_id data_id; + + if (client_data) { + data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext); + if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) { + HWFNC_ERR("Populating client_data:%llu with invalid client_id_ext:%d\n", + client_data, hw_fence_client->client_id_ext); + return -EINVAL; + } + } + + /* + * Create join fence from the join-fences table, + * This function initializes: + * join_fence->pending_child_count = array->num_fences + */ + join_fence = _hw_fence_process_join_fence(drv_data, hw_fence_client, array, + hash_join_fence, true); + if (!join_fence) { + HWFNC_ERR("cannot alloc hw fence for join fence array\n"); + return -EINVAL; + } + + /* update this as waiting client of the join-fence */ + GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */ + join_fence->wait_client_mask |= BIT(hw_fence_client->client_id); + GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */ + + /* Iterate through fences of the array */ + for (i = 0; i < array->num_fences; i++) { + child_fence = array->fences[i]; + + if (!child_fence) { + HWFNC_ERR("NULL child fence at index:%d for fence array\n", i); + ret = -EINVAL; + goto error_array; + } + + /* Nested fence-arrays are not supported */ + if (to_dma_fence_array(child_fence)) { + HWFNC_ERR("This is a nested fence, fail!\n"); + ret = -EINVAL; + goto error_array; + } + + /* All elements in the fence-array must be hw-fences */ + if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &child_fence->flags)) { + HWFNC_ERR("DMA Fence in FenceArray is not a HW Fence\n"); + ret = -EINVAL; + goto error_array; + } + + /* Find the HW Fence in the Global Table */ + hw_fence_child = hw_fence_find_with_dma_fence(drv_data, hw_fence_client, + child_fence, &hash, &child_is_signaled, false); + if (child_is_signaled) { + signal_join_fence = _update_and_get_join_fence_signal_status(drv_data, + join_fence, child_fence->error); + continue; + } else if (!hw_fence_child) { + HWFNC_ERR("Cannot find child fence context:%llu seqno:%llu hash:%llu\n", + child_fence->context, child_fence->seqno, hash); + ret = -EINVAL; + goto error_array; + } + + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 1); /* lock */ + if (hw_fence_child->flags & MSM_HW_FENCE_FLAG_SIGNAL) { + + /* child fence is already signaled */ + signal_join_fence = _update_and_get_join_fence_signal_status(drv_data, + join_fence, hw_fence_child->error); + } else { + + /* child fence is not signaled */ + hw_fence_child->parents_cnt++; + + if (hw_fence_child->parents_cnt >= MSM_HW_FENCE_MAX_JOIN_PARENTS + || hw_fence_child->parents_cnt < 1) { + + /* Max number of parents for a fence is exceeded */ + HWFNC_ERR("DMA Fence in FenceArray exceeds parents:%d\n", + hw_fence_child->parents_cnt); + hw_fence_child->parents_cnt--; + + /* decrement refcount acquired by finding fence */ + hw_fence_put_and_unlock(drv_data, hw_fence_client->client_id, + hw_fence_child, hash); + + ret = -EINVAL; + goto error_array; + } + + hw_fence_child->parent_list[hw_fence_child->parents_cnt - 1] = + *hash_join_fence; + } + /* decrement refcount acquired by finding fence */ + hw_fence_put_and_unlock(drv_data, hw_fence_client->client_id, hw_fence_child, hash); + } + + if (client_data) + join_fence->client_data[data_id] = client_data; + + /* all fences were signaled, signal client now */ + if (signal_join_fence) { + + /* signal the join hw fence */ + _fence_ctl_signal(drv_data, hw_fence_client, join_fence, *hash_join_fence, 0, + client_data, join_fence->error, true); + set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &array->base.flags); + + /* + * job of the join-fence is finished since we already signaled, + * we can delete it now. This can happen when all the fences that + * are part of the join-fence are already signaled. + */ + _hw_fence_process_join_fence(drv_data, hw_fence_client, array, hash_join_fence, + false); + } else if (!array->num_fences) { + /* + * if we didn't signal the join-fence and the number of fences is not set in + * the fence-array, then fail here, otherwise driver would create a join-fence + * with no-childs that won't be signaled at all or an incomplete join-fence + */ + HWFNC_ERR("invalid fence-array ctx:%llu seqno:%llu without fences\n", + array->base.context, array->base.seqno); + goto error_array; + } + + return ret; + +error_array: + _cleanup_join_and_child_fences(drv_data, hw_fence_client, i, array, join_fence, + *hash_join_fence); + + return -EINVAL; +} + +/** + * Registers the hw-fence client for wait on a hw-fence and keeps a reference on that hw-fence. + * The hw-fence must be explicitly dereferenced following this function, e.g. by client + * synx_release call. + * This function does not register the fence_allocator as a waiting client. + * + * Note: This is the only place where the hw-fence refcount is retained for the client to release. + * In all other places, the HW Fence Driver releases the refcount held for processing. + */ +int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, + struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, + u64 seqno, u64 *hash, u64 client_data) +{ + struct msm_hw_fence *hw_fence; + enum hw_fence_client_data_id data_id; + bool is_signaled = false; + int destroy_ret, ret = 0; + + if (client_data) { + data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext); + if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) { + HWFNC_ERR("Populating client_data:%llu with invalid client_id_ext:%d\n", + client_data, hw_fence_client->client_id); + return -EINVAL; + } + } + + /* refcount from finding fence must be explicitly released outside this function call */ + if (fence) + hw_fence = hw_fence_find_with_dma_fence(drv_data, hw_fence_client, fence, hash, + &is_signaled, true); + else + hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, context, seqno, + hash); + if (!hw_fence) { + HWFNC_ERR("Cannot find fence!\n"); + return -EINVAL; + } + + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ + + /* + * If a creating client calls synx_import, then an additional hlos refcount is taken and a + * refcount is set for processing this fence in FenceCTL + */ + if (hw_fence->fence_allocator == hw_fence_client->client_id) { + hw_fence->refcount |= HW_FENCE_FCTL_REFCOUNT; + } else { + /* register client in the hw fence */ + is_signaled = hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL; + hw_fence->wait_client_mask |= BIT(hw_fence_client->client_id); + hw_fence->fence_wait_time = hw_fence_get_qtime(drv_data); + if (client_data) + hw_fence->client_data[data_id] = client_data; + } + + /* update memory for the table update */ + wmb(); + + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + + /* if hw fence already signaled, signal the client */ + if (is_signaled) { + if (fence != NULL) + set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &fence->flags); + ret = _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, *hash, 0, client_data, + hw_fence->error, true); + if (ret) { + HWFNC_ERR("failed to signal client:%d for import signaled fence h:%llu\n", + hw_fence_client ? hw_fence_client->client_id : 0xff, *hash); + destroy_ret = hw_fence_destroy_with_hash(drv_data, hw_fence_client, *hash); + if (destroy_ret) + HWFNC_ERR("failed destroy ref for failed import client:%d h:%llu\n", + hw_fence_client ? hw_fence_client->client_id : 0xff, *hash); + } + } + + return ret; +} + +int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + struct dma_fence *fence, u64 *hash, u64 client_data) +{ + int ret = 0; + + if (!drv_data | !hw_fence_client | !fence) { + HWFNC_ERR("Invalid Input!\n"); + return -EINVAL; + } + /* fence must be hw-fence */ + if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + HWFNC_ERR("DMA Fence in is not a HW Fence flags:0x%lx\n", fence->flags); + return -EINVAL; + } + + ret = hw_fence_register_wait_client(drv_data, fence, hw_fence_client, fence->context, + fence->seqno, hash, client_data); + if (ret) + HWFNC_ERR("Error registering for wait client:%d\n", hw_fence_client->client_id); + + return ret; +} + +static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u64 wait_client_mask, u64 hash, int error) +{ + enum hw_fence_client_id wait_client_id; + enum hw_fence_client_data_id data_id; + struct msm_hw_fence_client *hw_fence_wait_client; + u64 client_data = 0; + + /* signal with an error all the waiting clients for this fence */ + for (wait_client_id = 0; wait_client_id <= drv_data->rxq_clients_num; wait_client_id++) { + if (wait_client_mask & BIT(wait_client_id)) { + hw_fence_wait_client = drv_data->clients[wait_client_id]; + + if (!hw_fence_wait_client) + continue; + + data_id = hw_fence_get_client_data_id(hw_fence_wait_client->client_id_ext); + + if (data_id < HW_FENCE_MAX_CLIENTS_WITH_DATA) + client_data = hw_fence->client_data[data_id]; + + _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence, + hash, 0, client_data, error, false); + } + } +} + +/* + * This function must be called with a signaled hw-fence; hw_fence->parents_cnt and + * hw_fence->parent_list fields are only modified for unsignaled fences + */ +static void _signal_parent_fences(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u32 parents_cnt, u64 hash, int error) +{ + struct msm_hw_fence *join_fence; + u64 parent_hash; + int i; + + if (parents_cnt > MSM_HW_FENCE_MAX_JOIN_PARENTS) { + HWFNC_ERR("hw_fence hash:%llu has invalid parents_cnt:%u max:%u\n", hash, + parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS); + parents_cnt = MSM_HW_FENCE_MAX_JOIN_PARENTS; + } + + for (i = 0; i < parents_cnt; i++) { + parent_hash = hw_fence->parent_list[i]; + join_fence = _get_hw_fence(drv_data->hw_fence_table_entries, + drv_data->hw_fences_tbl, parent_hash); + if (!join_fence) { + HWFNC_ERR("bad parent hash:%llu of child hash:%llu\n", parent_hash, hash); + continue; + } + + if (_update_and_get_join_fence_signal_status(drv_data, join_fence, error)) { + /* no need to lock access to wait client mask for join fences */ + _signal_all_wait_clients(drv_data, join_fence, join_fence->wait_client_mask, + parent_hash, join_fence->error); + + /* decrement refcount for signal on behalf of fence controller */ + hw_fence_destroy_refcount(drv_data, parent_hash, HW_FENCE_FCTL_REFCOUNT); + } + } +} + +/* + * Check fence signaling status. If unsignaled, + * 1. signal waiting clients, + * 2. signal parent fences (and waiting clients on parent fences) + * 3. decrement refcount for signal on behalf of fence controller (if release_ref is true) + */ +static void _signal_fence_if_unsignaled(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u64 hash, int error, bool release_ref) +{ + u64 wait_client_mask; + u32 parents_cnt; + + /* check flags and error for signaling */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ + if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) { + /* fence is already signaled so do nothing */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); + return; + } + hw_fence->flags |= MSM_HW_FENCE_FLAG_SIGNAL; + hw_fence->error = error; + wait_client_mask = hw_fence->wait_client_mask; + parents_cnt = hw_fence->parents_cnt; + hw_fence->parents_cnt = 0; + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + + /* fields used by the following are not modified for signaled fences */ + _signal_parent_fences(drv_data, hw_fence, parents_cnt, hash, error); + _signal_all_wait_clients(drv_data, hw_fence, wait_client_mask, hash, error); + + /* remove ref held by fence controller to signal hw-fence */ + if (release_ref) + hw_fence_destroy_refcount(drv_data, hash, HW_FENCE_FCTL_REFCOUNT); +} + +struct msm_hw_fence *_create_signaled_hw_fence(struct hw_fence_driver_data *drv_data, + u32 client_id, struct dma_fence *fence, u64 *hash) +{ + struct msm_hw_fence *hw_fence; + + /* create new hw-fence for signaled dma-fence */ + hw_fence = _hw_fence_lookup_and_create_range(drv_data, client_id, (u64)fence, + fence->context, fence->seqno, 0, hash, 0, drv_data->hw_fences_tbl_cnt, + MSM_HW_FENCE_FLAG_CREATE_SIGNALED); + if (hw_fence) { + _signal_fence_if_unsignaled(drv_data, hw_fence, *hash, fence->error, true); + HWFNC_DBG_H("created hw-fence to back signaled fence client:%u ctx:%llu seq:%llu\n", + client_id, fence->context, fence->seqno); + } else { + HWFNC_ERR("Fail to create signaled hfence client:%u ctx:%llu seq:%llu\n", client_id, + fence->context, fence->seqno); + } + + return hw_fence; +} + +/* finds hw-fence in HW Fence table if present; if not and create==true, create a new hw-fence */ +struct msm_hw_fence *hw_fence_find_with_dma_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash, + bool *is_signaled, bool create) +{ + u32 step, end_step, client_id = hw_fence_client ? hw_fence_client->client_id : 0xff; + struct msm_hw_fence *hw_fence = NULL; + + if (!create && dma_fence_is_signaled(fence)) { + /* signaled dma-fence may have been removed from table */ + *is_signaled = true; + return NULL; + } + + for (step = 0; step < drv_data->hw_fences_tbl_cnt; step += HW_FENCE_FIND_THRESHOLD) { + end_step = (step + HW_FENCE_FIND_THRESHOLD > drv_data->hw_fences_tbl_cnt) ? + drv_data->hw_fences_tbl_cnt : step + HW_FENCE_FIND_THRESHOLD; + hw_fence = _hw_fence_lookup_and_process_range(drv_data, (u64)fence, fence->context, + fence->seqno, hash, step, end_step, _fence_found); + if (hw_fence) { + /* successfully found backing hw-fence*/ + *is_signaled = false; + return hw_fence; + } + if (dma_fence_is_signaled(fence)) { + /* signaled dma-fence may have been removed from table */ + *is_signaled = true; + return create ? _create_signaled_hw_fence(drv_data, client_id, fence, hash) + : NULL; + } + } + + /* + * The dma-fence signal callback holds a hw-fence refcount until dma-fence signal. If we hit + * this condition (unable to find unsignaled dma-fence with HW Fencing enabled), then the + * hw-fence has been incorrectly released early by someone who did not own the reference. + */ + HWFNC_ERR("Can't find backing hwfence for dma-fence client:%u ctx:%llu seq:%llu f:0x%lx\n", + client_id, fence->context, fence->seqno, fence->flags); + *is_signaled = false; + return NULL; +} + +void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client) +{ + struct msm_hw_fence_queue *queue; + u32 rd_idx, wr_idx, lock_idx; + u32 *rd_idx_ptr, *wr_idx_ptr, *tx_wm_ptr; + + queue = &hw_fence_client->queues[HW_FENCE_TX_QUEUE - 1]; + hw_fence_get_queue_idx_ptrs(drv_data, queue->va_header, &rd_idx_ptr, &wr_idx_ptr, + &tx_wm_ptr); + + /* For the client TxQ: set the read-index same as last write that was done by the client */ + mb(); /* make sure data is ready before read */ + wr_idx = readl_relaxed(wr_idx_ptr); + if (queue->skip_wr_idx) + *tx_wm_ptr = wr_idx; + writel_relaxed(wr_idx, rd_idx_ptr); + wmb(); /* make sure data is updated after write the index*/ + HWFNC_DBG_Q("update tx queue %s to match write_index:%u\n", + queue->skip_wr_idx ? "read_index=tx_wm" : "read_index", wr_idx); + + /* For the client RxQ: set the write-index same as last read done by the client */ + if (hw_fence_client->update_rxq) { + lock_idx = (hw_fence_client->client_id - 1) * HW_FENCE_LOCK_IDX_OFFSET; + + if (lock_idx >= drv_data->client_lock_tbl_cnt) { + HWFNC_ERR("can't reset rxq, lock for client:%d lock_idx:%d exceed max:%d\n", + hw_fence_client->client_id, lock_idx, + drv_data->client_lock_tbl_cnt); + return; + } + HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx); + + /* lock the client rx queue to update */ + GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 1); + + queue = &hw_fence_client->queues[HW_FENCE_RX_QUEUE - 1]; + hw_fence_get_queue_idx_ptrs(drv_data, queue->va_header, &rd_idx_ptr, &wr_idx_ptr, + &tx_wm_ptr); + + mb(); /* make sure data is ready before read */ + rd_idx = readl_relaxed(rd_idx_ptr); + writel_relaxed(rd_idx, wr_idx_ptr); + wmb(); /* make sure data is updated after write the index */ + + /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0); + HWFNC_DBG_Q("update rx queue write_index to match read_index:%u\n", rd_idx); + } +} + +int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, + u32 reset_flags) +{ + int ret = 0; + int error = (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_ERROR) ? 0 : MSM_HW_FENCE_ERROR_RESET; + + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ + if (hw_fence->wait_client_mask & BIT(hw_fence_client->client_id)) { + HWFNC_DBG_H("clearing client:%d wait bit for fence: ctx:%llu seqno:%llu\n", + hw_fence_client->client_id, hw_fence->ctx_id, + hw_fence->seq_id); + hw_fence->wait_client_mask &= ~BIT(hw_fence_client->client_id); + + /* remove reference held by waiting client */ + if (!(reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY)) { + hw_fence_put_and_unlock(drv_data, hw_fence_client->client_id, hw_fence, + hash); + return 0; + } + } + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + + if (hw_fence->fence_allocator == hw_fence_client->client_id) { + + /* if fence is not signaled, signal with error all the waiting clients */ + _signal_fence_if_unsignaled(drv_data, hw_fence, hash, error, true); + + if (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY) + goto skip_destroy; + + ret = hw_fence_destroy_with_hash(drv_data, hw_fence_client, hash); + if (ret) { + HWFNC_ERR("Error destroying HW fence: hash:%llu\n", hash); + } + } + +skip_destroy: + return ret; +} + +enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id client_id) +{ + enum hw_fence_client_data_id data_id; + + switch (client_id) { + case HW_FENCE_CLIENT_ID_CTX0: + data_id = HW_FENCE_CLIENT_DATA_ID_CTX0; + break; + default: + data_id = HW_FENCE_MAX_CLIENTS_WITH_DATA; + break; + } + + return data_id; +} + +int hw_fence_signal_fence(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, u64 hash, + u32 error, bool release_ref) +{ + struct msm_hw_fence *hw_fence; + + if (!drv_data) { + HWFNC_ERR("bad drv_data\n"); + return -EINVAL; + } + + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, drv_data->hw_fences_tbl, hash); + if (!hw_fence) { + HWFNC_ERR("bad hw fence hash:%llu\n", hash); + return -EINVAL; + } + + if (fence && (hw_fence->ctx_id != fence->context || hw_fence->seq_id != fence->seqno)) { + HWFNC_ERR("invalid hfence hash:%llu ctx:%llu seq:%llu expected ctx:%llu seq:%llu\n", + hash, hw_fence->ctx_id, hw_fence->seq_id, fence->context, fence->seqno); + return -EINVAL; + } + + /* if unsignaled, signal but do not release ref held by FCTL */ + _signal_fence_if_unsignaled(drv_data, hw_fence, hash, error, release_ref); + + return 0; +} + +static void msm_hw_fence_signal_callback(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct hw_fence_signal_cb *signal_cb; + int ret = 0; + + if (!fence || !cb) { + HWFNC_ERR("Invalid params fence:0x%pK cb:0x%pK\n", fence, cb); + return; + } + + HWFNC_DBG_IRQ("dma-fence signal callback ctx:%llu seqno:%llu flags:%lx err:%d\n", + fence->context, fence->seqno, fence->flags, fence->error); + + signal_cb = (struct hw_fence_signal_cb *)cb; + ret = hw_fence_signal_fence(signal_cb->drv_data, fence, signal_cb->hash, fence->error, + false); + if (ret) + HWFNC_ERR("failed to signal fence ctx:%llu seq:%llu hash:%llu err:%u\n", + fence->context, fence->seqno, signal_cb->hash, fence->error); + else + /* release ref held by dma-fence signal */ + hw_fence_destroy_refcount(signal_cb->drv_data, signal_cb->hash, + HW_FENCE_DMA_FENCE_REFCOUNT); + + kfree(signal_cb); +} + +int hw_fence_add_callback(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, u64 hash) +{ + struct hw_fence_signal_cb *signal_cb; + struct msm_hw_fence *hw_fence; + int ret; + + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, drv_data->hw_fences_tbl, hash); + if (!hw_fence) { + HWFNC_ERR("Failed to find hw-fence for hash:%llu\n", hash); + return -EINVAL; + } + + signal_cb = kzalloc(sizeof(*signal_cb), GFP_ATOMIC); + if (!signal_cb) + return -ENOMEM; + + signal_cb->drv_data = drv_data; + signal_cb->hash = hash; + + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); + hw_fence->refcount |= HW_FENCE_DMA_FENCE_REFCOUNT; + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); + + ret = dma_fence_add_callback(fence, &signal_cb->fence_cb, msm_hw_fence_signal_callback); + if (ret) { + if (dma_fence_is_signaled(fence)) { + HWFNC_DBG_IRQ("dma_fence is signaled ctx:%llu seq:%llu flags:%lx err:%d\n", + fence->context, fence->seqno, fence->flags, fence->error); + msm_hw_fence_signal_callback(fence, &signal_cb->fence_cb); + ret = 0; + } else { + HWFNC_ERR("failed to add signal_cb ctx:%llu seq:%llu f:%lx err:%d ret:%d\n", + fence->context, fence->seqno, fence->flags, fence->error, ret); + /* release ref held by dma-fence signal */ + hw_fence_destroy_refcount(signal_cb->drv_data, signal_cb->hash, + HW_FENCE_DMA_FENCE_REFCOUNT); + kfree(signal_cb); + } + } + + return ret; +} + +int hw_fence_get_flags_error(struct hw_fence_driver_data *drv_data, u64 hash, u64 *flags, + u32 *error) +{ + struct msm_hw_fence *hw_fence; + + if (!drv_data) { + HWFNC_ERR("invalid drv_data\n"); + return -EINVAL; + } + + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, drv_data->hw_fences_tbl, hash); + if (!hw_fence) { + HWFNC_ERR("Failed to get hw-fence for hash:%llu\n", hash); + return -EINVAL; + } + *flags = hw_fence->flags; + *error = hw_fence->error; + + return 0; +} + +int hw_fence_update_hsynx(struct hw_fence_driver_data *drv_data, u64 hash, u32 h_synx, + bool wait_for) +{ + struct msm_hw_fence *hw_fence; + int ret = 0; + + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, drv_data->hw_fences_tbl, hash); + if (!hw_fence) { + HWFNC_ERR("Failed to get hw-fence for hash:%llu\n", hash); + return -EINVAL; + } + + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ + if (hw_fence->h_synx && hw_fence->h_synx != h_synx) { + ret = -EINVAL; + goto error; + } + hw_fence->h_synx = h_synx; + if (wait_for) + hw_fence->fence_wait_time = hw_fence_get_qtime(drv_data); +error: + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + + wmb(); /* update table */ + + if (ret) + HWFNC_ERR("setting h_synx:%u for hw-fence hash:%llu with existing h_synx:%u\n", + h_synx, hash, hw_fence->h_synx); + + return ret; +} + +int hw_fence_check_hw_fence_driver(struct hw_fence_driver_data *drv_data) +{ + if (IS_ERR_OR_NULL(drv_data) || !drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -EINVAL; + } + + return 0; +} + +int hw_fence_check_valid_client(struct hw_fence_driver_data *drv_data, void *client_handle) +{ + int ret; + + ret = hw_fence_check_hw_fence_driver(drv_data); + if (ret) + return ret; + + if (IS_ERR_OR_NULL(client_handle)) { + HWFNC_ERR("Invalid client\n"); + return -EINVAL; + } + + return 0; +} + +int hw_fence_check_valid_fctl(struct hw_fence_driver_data *drv_data, void *client_handle) +{ + int ret; + + ret = hw_fence_check_valid_client(drv_data, client_handle); + if (ret) + return ret; + + if (!drv_data->fctl_ready) { + HWFNC_ERR("fctl in invalid state, cannot perform operation\n"); + return -EAGAIN; + } + + return 0; +} + +/* unlock the in-flight hw-fence and any locks taken on client rx queue for handling */ +static void unlock_in_flight_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u64 hash, u64 in_flight_lock) +{ + u64 wait_client_mask; + u32 wait_client_id, lock_idx; + + HWFNC_DBG_SSR("unlock in-flight fence locked as 0x%llx\n", hw_fence->lock); + hw_fence_debug_dump_fence(HW_FENCE_SSR, hw_fence, hash, 0); + wait_client_mask = hw_fence->wait_client_mask; + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); + + for (wait_client_id = 0; wait_client_id <= drv_data->rxq_clients_num; wait_client_id++) { + if (wait_client_mask & BIT(wait_client_id)) { + lock_idx = (wait_client_id - 1) * HW_FENCE_LOCK_IDX_OFFSET; + if (drv_data->client_lock_tbl[lock_idx] == in_flight_lock) { + GLOBAL_ATOMIC_STORE(drv_data, + &drv_data->client_lock_tbl[lock_idx], 0); + HWFNC_DBG_SSR("unlock client rxq id:%d locked as 0x%llx\n", + wait_client_id, in_flight_lock); + } + } + } +} + +int hw_fence_ssr_cleanup_table(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fences_tbl, u32 table_total_entries, u64 in_flight_lock) +{ + struct msm_hw_fence *hw_fence; + int i; + + if (!drv_data || !hw_fences_tbl || !in_flight_lock || in_flight_lock == BIT(0)) { + HWFNC_ERR("invalid params drv_data:0x%pK table:0x%pK in_flight_lock:0x%llx", + drv_data, hw_fences_tbl, in_flight_lock); + return -EINVAL; + } + + for (i = 0; i < table_total_entries; i++) { + hw_fence = _get_hw_fence(table_total_entries, hw_fences_tbl, i); + + if (hw_fence->lock == in_flight_lock) { + /* only one fence should be affected by this */ + unlock_in_flight_fence(drv_data, hw_fence, i, in_flight_lock); + } + _signal_fence_if_unsignaled(drv_data, hw_fence, i, MSM_HW_FENCE_ERROR_RESET, false); + } + + return 0; +} diff --git a/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_utils.c b/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_utils.c new file mode 100644 index 0000000000..42148cbeae --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_utils.c @@ -0,0 +1,1714 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE) +#include +#else +#include +#endif +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) +#include +#endif +#include +#include +#include +#if (KERNEL_VERSION(6, 1, 25) <= LINUX_VERSION_CODE) +#include +#endif +#include +#include + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_debug.h" + +/** + * MAX_CLIENT_QUEUE_MEM_SIZE: + * Maximum memory size for client queues of a hw fence client. + */ +#define MAX_CLIENT_QUEUE_MEM_SIZE 0x100000 + +/** + * HW_FENCE_MAX_CLIENT_TYPE: + * Total number of client types with and without configurable number of sub-clients + */ +#define HW_FENCE_MAX_CLIENT_TYPE (HW_FENCE_MAX_CLIENT_TYPE_STATIC + \ + HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE) + +/** + * HW_FENCE_MIN_RXQ_CLIENTS: + * Minimum number of static hw fence clients with rxq + */ +#define HW_FENCE_MIN_RXQ_CLIENTS HW_FENCE_CLIENT_ID_VAL6 + +/** + * HW_FENCE_MIN_RXQ_CLIENT_TYPE: + * Minimum number of static hw fence client types with rxq (GFX, DPU, VAL) + */ +#define HW_FENCE_MIN_RXQ_CLIENT_TYPE 3 + +/* Maximum number of clients for each client type */ +#define HW_FENCE_CLIENT_TYPE_MAX_GPU 1 +#define HW_FENCE_CLIENT_TYPE_MAX_DPU 6 +#define HW_FENCE_CLIENT_TYPE_MAX_VAL 7 +#define HW_FENCE_CLIENT_TYPE_MAX_IPE 32 +#define HW_FENCE_CLIENT_TYPE_MAX_VPU 32 +#define HW_FENCE_CLIENT_TYPE_MAX_IFE 32 +#define HW_FENCE_CLIENT_TYPE_MAX_IPA 32 + +/** + * HW_FENCE_CLIENT_ID_CTRL_QUEUE: + * Bit set in signaled clients mask if hw fence driver should read ctrl rx queue + */ +#define HW_FENCE_CLIENT_ID_CTRL_QUEUE 0 + +/** + * HW_FENCE_SIGNALED_CLIENTS_LAST: + * Last signaled clients id for which HW Fence Driver can receive doorbell + */ +#if IS_ENABLED(CONFIG_DEBUG_FS) +#define HW_FENCE_SIGNALED_CLIENTS_LAST HW_FENCE_CLIENT_ID_VAL6 +#else +#define HW_FENCE_SIGNALED_CLIENTS_LAST HW_FENCE_CLIENT_ID_CTRL_QUEUE +#endif /* CONFIG_DEBUG_FS */ + +/** + * HW_FENCE_ALL_SIGNALED_CLIENTS_MASK: + * Each bit in this mask represents possible signaled client ids for which hw fence driver can + * receive + */ +#define HW_FENCE_ALL_SIGNALED_CLIENTS_MASK \ + GENMASK(HW_FENCE_SIGNALED_CLIENTS_LAST, HW_FENCE_CLIENT_ID_CTRL_QUEUE) + +/** + * HW_FENCE_MAX_ITER_READ: + * Maximum number of iterations when reading queue + */ +#define HW_FENCE_MAX_ITER_READ 100 + +/** + * HW_FENCE_SOCCP_INIT_TIMEOUT_MS: + * Timeout in ms for hw-fence driver delay of ssr callback while waiting for soccp response message + */ +#define HW_FENCE_SOCCP_INIT_TIMEOUT_MS 50 + +/** + * HW_FENCE_FCTL_LOCK_VALUE: + * Fence controller sets the hw-fence lock value to this when locking a given fence. + */ +#define HW_FENCE_FCTL_LOCK_VALUE BIT(1) + +/** + * HW_FENCE_MAX_EVENTS: + * Maximum number of HW Fence debug events + */ +#define HW_FENCE_MAX_EVENTS 1000 + +/** + * DT_PROPS_CLIENT_NAME_SIZE: + * Maximum number of characters in client name used in device-tree properties + */ +#define DT_PROPS_CLIENT_NAME_SIZE 10 + +/** + * DT_PROPS_CLIENT_PROPS_SIZE: + * Maximum number of characters in property name for base client queue properties. + */ +#define DT_PROPS_CLIENT_PROPS_SIZE (DT_PROPS_CLIENT_NAME_SIZE + 27) + +/** + * DT_PROPS_CLIENT_EXTRA_PROPS_SIZE: + * Maximum number of characters in property name for extra client queue properties. + */ +#define DT_PROPS_CLIENT_EXTRA_PROPS_SIZE (DT_PROPS_CLIENT_NAME_SIZE + 33) + +/** + * struct hw_fence_client_types - Table describing all supported client types, used to parse + * device-tree properties related to client queue size. + * + * The fields name, init_id, and max_clients_num are constants. Default values for clients_num, + * queues_num, and skip_txq_wr_idx are provided in this table, and clients_num, queues_num, + * queue_entries, and skip_txq_wr_idx can be read from device-tree. + * + * If a value for queue entries is not parsed for the client type, then the default number of client + * queue entries (parsed from device-tree) is used. + * + * Notes: + * 1. Client types must be in the same order as client_ids within the enum 'hw_fence_client_id'. + * 2. Each HW Fence client ID must be described by one of the client types in this table. + * 3. A new client type must set: name, init_id, max_clients_num, clients_num, queues_num, and + * skip_txq_wr_idx. + * 4. Either constant HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE or HW_FENCE_MAX_CLIENT_TYPE_STATIC must + * be incremented as appropriate for new client types. + */ +struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = { + {"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false, false}, + {"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false, false}, + {"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false, false}, + {"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, HW_FENCE_CLIENT_QUEUES, + 0, 0, 0, 0, 0, 0, false, false}, + {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, HW_FENCE_CLIENT_QUEUES, + 0, 0, 0, 0, 0, 0, false, false}, + {"ipa", HW_FENCE_CLIENT_ID_IPA, HW_FENCE_CLIENT_TYPE_MAX_IPA, 0, 1, 0, 0, 0, 0, 0, 0, + false, false}, + {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true, false}, + {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true, false}, + {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true, false}, + {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true, false}, + {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true, false}, + {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true, false}, + {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true, false}, + {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true, false}, + {"ife8", HW_FENCE_CLIENT_ID_IFE8, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true, false}, + {"ife9", HW_FENCE_CLIENT_ID_IFE9, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true, false}, + {"ife10", HW_FENCE_CLIENT_ID_IFE10, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true, false}, + {"ife11", HW_FENCE_CLIENT_ID_IFE11, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true, false}, +}; + +#define hw_fence_wait_event_timeout(waitq, cond, timeout_ms, ret) \ + do { \ + ktime_t cur_ktime; \ + ktime_t exp_ktime; \ + s64 wait_time_jiffies = msecs_to_jiffies(timeout_ms); \ +\ + exp_ktime = ktime_add_ms(ktime_get(), timeout_ms); \ + do { \ + ret = wait_event_timeout(waitq, cond, \ + wait_time_jiffies); \ + cur_ktime = ktime_get(); \ + } while ((!cond) && (ret == 0) && \ + (ktime_compare(ktime_sub(exp_ktime, cur_ktime), ktime_set(0, 0)) > 0));\ + } while (0) + +static void _lock(uint64_t *wait) +{ +#if defined(__aarch64__) + __asm__( + // Sequence to wait for lock to be free (i.e. zero) + "PRFM PSTL1KEEP, [%x[i_lock]]\n\t" + "1:\n\t" + "LDAXR W5, [%x[i_lock]]\n\t" + "CBNZ W5, 1b\n\t" + // Sequence to set PVM BIT0 + "LDR W7, =0x1\n\t" // Load BIT0 (0x1) into W7 + "STXR W5, W7, [%x[i_lock]]\n\t" // Atomic Store exclusive BIT0 (lock = 0x1) + "CBNZ W5, 1b\n\t" // If cannot set it, goto 1 + : + : [i_lock] "r" (wait) + : "memory"); +#elif + HWFNC_ERR("cannot lock\n"); +#endif +} + +static void _unlock_vm(struct hw_fence_driver_data *drv_data, uint64_t *lock) +{ + uint64_t lock_val; + +#if defined(__aarch64__) + __asm__( + // Sequence to clear PVM BIT0 + "2:\n\t" + "LDAXR W5, [%x[i_out]]\n\t" // Atomic Fetch Lock + "AND W6, W5, #0xFFFFFFFFFFFFFFFE\n\t" // AND to clear BIT0 (lock &= ~0x1)) + "STXR W5, W6, [%x[i_out]]\n\t" // Store exclusive result + "CBNZ W5, 2b\n\t" // If cannot store exclusive, goto 2 + : + : [i_out] "r" (lock) + : "memory"); +#elif + HWFNC_ERR("cannot unlock\n"); +#endif + mb(); /* Make sure the memory is updated */ + + lock_val = *lock; /* Read the lock value */ + HWFNC_DBG_LOCK("unlock: lock_val after:0x%llx\n", lock_val); + if (lock_val & HW_FENCE_FCTL_LOCK_VALUE) { /* check if SVM BIT1 is set*/ + /* + * SVM is in WFI state, since SVM acquire bit is set + * Trigger IRQ to Wake-Up SVM Client + */ +#if IS_ENABLED(CONFIG_DEBUG_FS) + drv_data->debugfs_data.lock_wake_cnt++; + HWFNC_DBG_LOCK("triggering ipc to unblock SVM lock_val:%llu cnt:%llu\n", lock_val, + drv_data->debugfs_data.lock_wake_cnt); +#endif + hw_fence_ipcc_trigger_signal(drv_data, + drv_data->ipcc_client_pid, + drv_data->ipcc_fctl_vid, 30); /* Trigger APPS Signal 30 */ + } +} + +static void _unlock_soccp(uint64_t *lock) +{ + /* Signal Client */ +#if defined(__aarch64__) + __asm__("STLR WZR, [%x[i_out]]\n\t" + "SEV\n" + : + : [i_out] "r" (lock) + : "memory"); +#elif + HWFNC_ERR("cannot unlock\n"); +#endif +} + +void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, bool val) +{ + if (val) { + preempt_disable(); + _lock(lock); + } else { + if (drv_data->has_soccp) + _unlock_soccp(lock); + else + _unlock_vm(drv_data, lock); + preempt_enable(); + } +} + +int hw_fence_utils_fence_error_cb(struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, + u64 seqno, u64 hash, u64 flags, u32 error) +{ + struct msm_hw_fence_cb_data cb_data; + struct dma_fence fence; + int ret = 0; + + if (IS_ERR_OR_NULL(hw_fence_client)) { + HWFNC_ERR("Invalid client:0x%pK\n", hw_fence_client); + return -EINVAL; + } + + mutex_lock(&hw_fence_client->error_cb_lock); + if (!error || !hw_fence_client->fence_error_cb) { + HWFNC_ERR("Invalid error:%d fence_error_cb:0x%pK\n", error, + hw_fence_client->fence_error_cb); + ret = -EINVAL; + goto exit; + } + + /* initialize cb_data info */ + fence.context = ctxt_id; + fence.seqno = seqno; + fence.flags = flags; + fence.error = error; + cb_data.fence = &fence; + cb_data.data = hw_fence_client->fence_error_cb_userdata; + + HWFNC_DBG_L("invoking cb for client:%d ctx:%llu seq:%llu flags:%llu e:%u data:0x%pK\n", + hw_fence_client->client_id, ctxt_id, seqno, flags, error, + hw_fence_client->fence_error_cb_userdata); + + hw_fence_client->fence_error_cb(hash, error, &cb_data); + +exit: + mutex_unlock(&hw_fence_client->error_cb_lock); + + return ret; +} + +static int _process_fence_error_payload(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_queue_payload *payload) +{ + struct msm_hw_fence_client *hw_fence_client; + u32 client_id; + int ret; + + if (!drv_data || !payload || payload->type != HW_FENCE_PAYLOAD_TYPE_2) { + HWFNC_ERR("invalid drv_data:0x%pK payload:0x%pK type:%d expected type:%d\n", + drv_data, payload, payload ? payload->type : -1, HW_FENCE_PAYLOAD_TYPE_2); + return -EINVAL; + } + + if (payload->client_data < HW_FENCE_CLIENT_ID_CTX0 || + payload->client_data >= drv_data->clients_num) { + HWFNC_ERR("read invalid client_id:%llu from ctrl rxq min:%u max:%u\n", + payload->client_data, HW_FENCE_CLIENT_ID_CTX0, + drv_data->clients_num); + return -EINVAL; + } + + client_id = payload->client_data; + HWFNC_DBG_Q("ctrl rxq rd: h:%llu ctx:%llu seq:%llu f:%llu e:%u client:%u\n", payload->hash, + payload->ctxt_id, payload->seqno, payload->flags, payload->error, client_id); + + hw_fence_client = drv_data->clients[client_id]; + if (!hw_fence_client) { + HWFNC_ERR("processing fence error cb for unregistered client_id:%u\n", + client_id); + return -EINVAL; + } + + ret = hw_fence_utils_fence_error_cb(hw_fence_client, payload->ctxt_id, + payload->seqno, payload->hash, payload->flags, payload->error); + if (ret) + HWFNC_ERR("fence_error_cb failed for client:%u ctx:%llu seq:%llu err:%u\n", + client_id, payload->ctxt_id, payload->seqno, payload->error); + + return ret; +} + +static int _process_init_soccp_payload(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_queue_payload *payload) +{ + struct hw_fence_soccp *soccp_props; + + if (!drv_data || !drv_data->has_soccp || !payload || + !(payload->type == HW_FENCE_PAYLOAD_TYPE_3 || + payload->type == HW_FENCE_PAYLOAD_TYPE_4)) { + HWFNC_ERR("invalid drv_data:0x%pK has_soccp:%d payload:0x%pK type:%d expected:%d\n", + drv_data, drv_data ? drv_data->has_soccp : -1, payload, + payload ? payload->type : -1, HW_FENCE_PAYLOAD_TYPE_3); + return -EINVAL; + } + + soccp_props = &drv_data->soccp_props; + if (payload->type == HW_FENCE_PAYLOAD_TYPE_4 && !soccp_props->ssr_cnt) { + HWFNC_ERR("incorrectly received type:%d when ssr error is not happening\n", + payload->type); + return -EINVAL; + } + + HWFNC_DBG_INIT("Received ctrlq msg type:%d that soccp is initialized\n", payload->type); + drv_data->fctl_ready = true; + wake_up_all(&soccp_props->ssr_wait_queue); + + return 0; +} + +static int _process_ctrl_rx_queue(struct hw_fence_driver_data *drv_data) +{ + struct msm_hw_fence_queue_payload payload; + int i, ret = 0, read = 1; + + for (i = 0; read && i < HW_FENCE_MAX_ITER_READ; i++) { + read = hw_fence_read_queue_helper(drv_data, + &drv_data->ctrl_queues[HW_FENCE_RX_QUEUE - 1], &payload); + if (read < 0) { + HWFNC_DBG_Q("unable to read ctrl rxq\n"); + return 0; + } + switch (payload.type) { + case HW_FENCE_PAYLOAD_TYPE_2: + ret = _process_fence_error_payload(drv_data, &payload); + break; + case HW_FENCE_PAYLOAD_TYPE_3: + case HW_FENCE_PAYLOAD_TYPE_4: + ret = _process_init_soccp_payload(drv_data, &payload); + break; + default: + HWFNC_ERR("received unexpected ctrl queue payload type:%d\n", payload.type); + ret = -EINVAL; + break; + } + } + + return ret; +} + +static int _process_signaled_client_id(struct hw_fence_driver_data *drv_data, int client_id) +{ + int ret; + + HWFNC_DBG_H("Processing signaled client mask id:%d\n", client_id); + switch (client_id) { + case HW_FENCE_CLIENT_ID_CTRL_QUEUE: + ret = _process_ctrl_rx_queue(drv_data); + break; +#if IS_ENABLED(CONFIG_DEBUG_FS) + case HW_FENCE_CLIENT_ID_VAL0: + case HW_FENCE_CLIENT_ID_VAL1: + case HW_FENCE_CLIENT_ID_VAL2: + case HW_FENCE_CLIENT_ID_VAL3: + case HW_FENCE_CLIENT_ID_VAL4: + case HW_FENCE_CLIENT_ID_VAL5: + case HW_FENCE_CLIENT_ID_VAL6: + ret = process_validation_client_loopback(drv_data, client_id); + break; +#endif /* CONFIG_DEBUG_FS */ + default: + HWFNC_ERR("unknown mask id:%d\n", client_id); + ret = -EINVAL; + } + + return ret; +} + +void hw_fence_utils_process_signaled_clients_mask(struct hw_fence_driver_data *drv_data, + u64 signaled_clients_mask) +{ + int signaled_client_id; + u64 mask; + + for (signaled_client_id = HW_FENCE_CLIENT_ID_CTRL_QUEUE; + signaled_client_id <= HW_FENCE_SIGNALED_CLIENTS_LAST; + signaled_client_id++) { + mask = 1 << signaled_client_id; + if (mask & signaled_clients_mask) { + HWFNC_DBG_H("received signaled_client:%d mask:0x%llx\n", signaled_client_id, + signaled_clients_mask); + + if (_process_signaled_client_id(drv_data, signaled_client_id)) + HWFNC_ERR("Failed to process signaled_client:%d\n", + signaled_client_id); + + /* clear mask for this flag id if nothing else pending finish */ + signaled_clients_mask = signaled_clients_mask & ~(mask); + HWFNC_DBG_H("signaled_client:%d cleared flags:0x%llx mask:0x%llx\n", + signaled_client_id, signaled_clients_mask, mask); + if (!signaled_clients_mask) + break; + } + } +} + +/* doorbell callback */ +static void _hw_fence_cb(int irq, void *data) +{ + struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data; + gh_dbl_flags_t clear_flags = HW_FENCE_ALL_SIGNALED_CLIENTS_MASK; + int ret; + + if (!drv_data) + return; + + ret = gh_dbl_read_and_clean(drv_data->rx_dbl, &clear_flags, 0); + if (ret) { + HWFNC_ERR("hw_fence db callback, retrieve flags fail ret:%d\n", ret); + return; + } + + HWFNC_DBG_IRQ("db callback label:%d irq:%d flags:0x%llx qtime:%llu\n", drv_data->db_label, + irq, clear_flags, hw_fence_get_qtime(drv_data)); + + hw_fence_utils_process_signaled_clients_mask(drv_data, clear_flags); +} + +int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data) +{ + struct device_node *node = drv_data->dev->of_node; + struct device_node *node_compat; + const char *compat = "qcom,msm-hw-fence-db"; + int ret; + + node_compat = of_find_compatible_node(node, NULL, compat); + if (!node_compat) { + HWFNC_ERR("Failed to find dev node with compat:%s\n", compat); + return -EINVAL; + } + + ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->db_label); + if (ret) { + HWFNC_ERR("failed to find label info %d\n", ret); + return ret; + } + + HWFNC_DBG_IRQ("registering doorbell db_label:%d\n", drv_data->db_label); + drv_data->rx_dbl = gh_dbl_rx_register(drv_data->db_label, _hw_fence_cb, drv_data); + if (IS_ERR_OR_NULL(drv_data->rx_dbl)) { + ret = PTR_ERR(drv_data->rx_dbl); + HWFNC_ERR("Failed to register doorbell\n"); + return ret; + } + + return 0; +} + +static irqreturn_t hw_fence_soccp_irq_handler(int irq, void *data) +{ + struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data; + u32 mask; + + mask = hw_fence_ipcc_get_signaled_clients_mask(drv_data); + atomic_or(mask, &drv_data->signaled_clients_mask); + wake_up_all(&drv_data->soccp_wait_queue); + + return IRQ_HANDLED; +} + +static int hw_fence_soccp_listener(void *data) +{ + struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data; + u32 mask; + + while (drv_data->has_soccp) { + wait_event(drv_data->soccp_wait_queue, + atomic_read(&drv_data->signaled_clients_mask) != 0); + mask = atomic_xchg(&drv_data->signaled_clients_mask, 0); + if (mask) + hw_fence_utils_process_signaled_clients_mask(drv_data, mask); + } + + return 0; +} + +static int _send_bootup_ctrl_txq_msg(struct hw_fence_driver_data *drv_data, u32 payload_type) +{ + struct msm_hw_fence_queue *queue; + int ret; + + if (drv_data->fctl_ready) + return 0; + + ret = hw_fence_utils_set_power_vote(drv_data, true); + if (ret) { + HWFNC_ERR("failed to set power vote to send ctrlq message ret:%d\n", ret); + return -EINVAL; + } + + /* soccp may fail to wake up during hw-fence driver probe */ + if (!drv_data->soccp_props.is_awake) { + HWFNC_DBG_INFO("rproc_set_state call failed to wake up soccp\n"); + ret = hw_fence_utils_set_power_vote(drv_data, false); + if (ret) + HWFNC_ERR("failed to remove power vote for ctrlq msg ret:%d\n", ret); + + return -EINVAL; + } + + queue = &drv_data->ctrl_queues[HW_FENCE_TX_QUEUE - 1]; + ret = hw_fence_update_queue_helper(drv_data, 0, queue, payload_type, 0, 0, 0, + 0, 0, 0, HW_FENCE_TX_QUEUE - 1); + if (ret) { + HWFNC_ERR("unable to update ctrl txq message\n"); + return ret; + } + + hw_fence_ipcc_trigger_signal(drv_data, drv_data->ipcc_client_pid, drv_data->ipcc_fctl_vid, + hw_fence_ipcc_get_signal_id(drv_data, 0)); + + /* wait for communication back from soccp with timeout */ + hw_fence_wait_event_timeout(drv_data->soccp_props.ssr_wait_queue, drv_data->fctl_ready, + HW_FENCE_SOCCP_INIT_TIMEOUT_MS, ret); + + ret = hw_fence_utils_set_power_vote(drv_data, false); + if (ret) + HWFNC_ERR("failed to remove power vote for ctrlq msg ret:%d\n", ret); + + if (!drv_data->fctl_ready) { + HWFNC_ERR("failed to receive ctrlq message for bootup event ret:%d\n", ret); + ret = -EINVAL; + } + + return ret; +} + +int hw_fence_utils_init_soccp_irq(struct hw_fence_driver_data *drv_data) +{ + struct platform_device *pdev; + struct task_struct *thread; + int irq, ret; + + if (!drv_data || !drv_data->dev || !drv_data->has_soccp) { + HWFNC_ERR("invalid drv_data:0x%pK dev:0x%pK has_soccp:%d\n", drv_data, + drv_data ? drv_data->dev : NULL, drv_data ? drv_data->has_soccp : -1); + return -EINVAL; + } + + init_waitqueue_head(&drv_data->soccp_wait_queue); + + pdev = to_platform_device(drv_data->dev); + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + HWFNC_ERR("failed to get the irq\n"); + return irq; + } + HWFNC_DBG_INIT("Registering irq:%d\n", irq); + + ret = devm_request_irq(drv_data->dev, irq, hw_fence_soccp_irq_handler, IRQF_TRIGGER_HIGH, + "hwfence-driver", drv_data); + if (ret < 0) { + HWFNC_ERR("failed to register irq:%d ret:%d\n", irq, ret); + return ret; + } + + thread = kthread_run(hw_fence_soccp_listener, (void *)drv_data, + "msm_hw_fence_soccp_listener"); + if (IS_ERR(thread)) { + HWFNC_ERR("failed to create thread to process signals received from soccp\n"); + return PTR_ERR(thread); + } + drv_data->soccp_listener_thread = thread; + + return ret; +} + +#if (KERNEL_VERSION(6, 1, 25) <= LINUX_VERSION_CODE) +/* + * This is called to set soccp power vote based off internal counter of soccp power votes. + * This must be called with rproc_lock held + */ +static int _set_intended_soccp_state(struct hw_fence_soccp *soccp_props) +{ + bool intended_state; + int ret; + + intended_state = (refcount_read(&soccp_props->usage_cnt) > 1); + + if (intended_state == soccp_props->is_awake) + return 0; + + /* cannot call soccp power vote because soccp has crashed */ + if (IS_ERR_OR_NULL(soccp_props->rproc)) { + HWFNC_DBG_SSR("Cannot set power vote before after_powerup notification\n"); + return -EINVAL; + } + + ret = rproc_set_state(soccp_props->rproc, intended_state); + if (!ret) + soccp_props->is_awake = intended_state; + + return ret; +} + +int hw_fence_utils_set_power_vote(struct hw_fence_driver_data *drv_data, bool state) +{ + struct hw_fence_soccp *soccp_props; + bool prev_state, cur_state; + int ret; + + if (!drv_data || !drv_data->has_soccp) { + HWFNC_ERR("invalid params: drv_data:0x%pK has_soccp:%d state:%d\n", drv_data, + drv_data ? drv_data->has_soccp : -1, state); + return -EINVAL; + } + + soccp_props = &drv_data->soccp_props; + mutex_lock(&soccp_props->rproc_lock); + if (state) { + refcount_inc(&soccp_props->usage_cnt); + } else { + if (refcount_read(&soccp_props->usage_cnt) == 1) { + mutex_unlock(&soccp_props->rproc_lock); + HWFNC_ERR("removing usage cnt that was never set\n"); + + return -EINVAL; + } + refcount_dec(&soccp_props->usage_cnt); + } + + prev_state = soccp_props->is_awake; + ret = _set_intended_soccp_state(soccp_props); + cur_state = soccp_props->is_awake; + + mutex_unlock(&soccp_props->rproc_lock); + + HWFNC_DBG_L("Set power vote prev:%d curr:%d req_state:%d votes:0x%x ret:%d\n", + prev_state, cur_state, state, refcount_read(&soccp_props->usage_cnt), ret); + + return 0; /* do not expose failures of power vote to client */ +} +#else +int hw_fence_utils_set_power_vote(struct hw_fence_driver_data *drv_data, bool state) +{ + HWFNC_ERR("Kernel version does not support SOCCP power votes\n"); + return -EINVAL; +} +#endif + +static int _set_soccp_rproc(struct hw_fence_soccp *soccp_props, phandle ph) +{ + int ret = 0; + + mutex_lock(&soccp_props->rproc_lock); + if (IS_ERR_OR_NULL(soccp_props->rproc)) + soccp_props->rproc = rproc_get_by_phandle(ph); + if (IS_ERR_OR_NULL(soccp_props->rproc)) { + ret = PTR_ERR(soccp_props->rproc); + if (!ret) + ret = -EINVAL; + soccp_props->rproc = NULL; + } + mutex_unlock(&soccp_props->rproc_lock); + + return ret; +} + +static int hw_fence_notify_ssr(struct notifier_block *nb, unsigned long action, void *data) +{ + struct hw_fence_soccp *soccp_props = container_of(nb, struct hw_fence_soccp, ssr_nb); + struct hw_fence_driver_data *drv_data = container_of(soccp_props, + struct hw_fence_driver_data, soccp_props); + struct qcom_ssr_notify_data *notify_data = data; + u32 payload_type; + int ret = 0; + + switch (action) { + case QCOM_SSR_BEFORE_POWERUP: + HWFNC_DBG_SSR("received soccp starting event\n"); + break; + case QCOM_SSR_AFTER_POWERUP: + HWFNC_DBG_SSR("received soccp running event\n"); + /* rproc must be available after power up notification */ + ret = _set_soccp_rproc(soccp_props, soccp_props->rproc_ph); + if (ret) + HWFNC_ERR("failed getting soccp_rproc:0x%pK ph:%d usage_cnt:0x%x ret:%d\n", + soccp_props->rproc, soccp_props->rproc_ph, + refcount_read(&soccp_props->usage_cnt), ret); + /* inform soccp of ctrl queue updates once it is up; this will set a power vote */ + payload_type = (soccp_props->ssr_cnt) ? HW_FENCE_PAYLOAD_TYPE_4 : + HW_FENCE_PAYLOAD_TYPE_3; + ret = _send_bootup_ctrl_txq_msg(drv_data, payload_type); + if (ret) { + HWFNC_ERR("failed to send ctrlq message for bootup event\n"); + goto end; + } + break; + case QCOM_SSR_BEFORE_SHUTDOWN: + HWFNC_DBG_SSR("received soccp %s event ssr_cnt:%d\n", notify_data->crashed ? + "crashed" : "stopping", soccp_props->ssr_cnt); + /* disallow fence creation, signaling, etc. when soccp is going to stop or crash */ + drv_data->fctl_ready = false; + soccp_props->ssr_cnt++; + break; + case QCOM_SSR_AFTER_SHUTDOWN: + HWFNC_DBG_SSR("received soccp offline event\n"); + mutex_lock(&soccp_props->rproc_lock); + if (!IS_ERR_OR_NULL(soccp_props->rproc)) + rproc_put(soccp_props->rproc); + soccp_props->rproc = NULL; + soccp_props->is_awake = false; + mutex_unlock(&soccp_props->rproc_lock); + ret = hw_fence_ssr_cleanup_table(drv_data, drv_data->hw_fences_tbl, + drv_data->hw_fence_table_entries, HW_FENCE_FCTL_LOCK_VALUE); + if (ret) + HWFNC_ERR("failed to cleanup hw-fence table for soccp ssr\n"); + break; + default: + HWFNC_ERR("received unrecognized event %lu\n", action); + break; + } + +end: + return ret ? NOTIFY_BAD : NOTIFY_OK; +} + +int hw_fence_utils_register_soccp_ssr_notifier(struct hw_fence_driver_data *drv_data) +{ + void *notifier; + struct hw_fence_soccp *soccp_props; + int ret; + + if (!drv_data || !drv_data->has_soccp) { + HWFNC_ERR("invalid drv_data:0x%pK has_soccp:%d\n", drv_data, + drv_data ? drv_data->has_soccp : -1); + return -EINVAL; + } + soccp_props = &drv_data->soccp_props; + + mutex_init(&soccp_props->rproc_lock); + refcount_set(&soccp_props->usage_cnt, 1); + init_waitqueue_head(&soccp_props->ssr_wait_queue); + soccp_props->ssr_nb.priority = 1; /* higher value indicates higher priority */ + soccp_props->ssr_nb.notifier_call = hw_fence_notify_ssr; + notifier = qcom_register_ssr_notifier("soccp", &soccp_props->ssr_nb); + if (IS_ERR(notifier)) { + HWFNC_ERR("failed to register soccp ssr notifier\n"); + return PTR_ERR(notifier); + } + soccp_props->ssr_notifier = notifier; + HWFNC_DBG_SSR("registered for soccp ssr notification notifier:0x%pK\n", notifier); + + /* if soccp is already up, do initial bootup here; this first attempt may fail */ + ret = _set_soccp_rproc(soccp_props, soccp_props->rproc_ph); + if (ret) { + HWFNC_DBG_INFO("failed getting soccp_rproc:0x%pK ph:%d at probe time ret:%d\n", + soccp_props->rproc, soccp_props->rproc_ph, ret); + return 0; + } + + ret = _send_bootup_ctrl_txq_msg(drv_data, HW_FENCE_PAYLOAD_TYPE_3); + if (ret) + HWFNC_DBG_INFO("can't send ctrl tx queue msg to inform soccp of mem map\n"); + + return 0; +} + +static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, + gh_vmid_t self, gh_vmid_t peer) +{ + struct qcom_scm_vmperm src_vmlist[] = {{self, PERM_READ | PERM_WRITE | PERM_EXEC}}; + struct qcom_scm_vmperm dst_vmlist[] = {{self, PERM_READ | PERM_WRITE}, + {peer, PERM_READ | PERM_WRITE}}; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + u64 srcvmids, dstvmids; +#else + unsigned int srcvmids, dstvmids; +#endif + struct gh_acl_desc *acl; + struct gh_sgl_desc *sgl; + int ret; + + srcvmids = BIT(src_vmlist[0].vmid); + dstvmids = BIT(dst_vmlist[0].vmid) | BIT(dst_vmlist[1].vmid); + ret = qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res), &srcvmids, + dst_vmlist, ARRAY_SIZE(dst_vmlist)); + if (ret) { + HWFNC_ERR("%s: qcom_scm_assign_mem failed addr=0x%llx size=%lu err=%d\n", + __func__, drv_data->res.start, drv_data->size, ret); + return ret; + } + + acl = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]), GFP_KERNEL); + if (!acl) + return -ENOMEM; + sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL); + if (!sgl) { + kfree(acl); + return -ENOMEM; + } + acl->n_acl_entries = 2; + acl->acl_entries[0].vmid = (u16)self; + acl->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W; + acl->acl_entries[1].vmid = (u16)peer; + acl->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W; + + sgl->n_sgl_entries = 1; + sgl->sgl_entries[0].ipa_base = drv_data->res.start; + sgl->sgl_entries[0].size = resource_size(&drv_data->res); + +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + ret = ghd_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label, + acl, sgl, NULL, &drv_data->memparcel); +#else + ret = gh_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label, + acl, sgl, NULL, &drv_data->memparcel); +#endif + if (ret) { + HWFNC_ERR("%s: gh_rm_mem_share failed addr=%llx size=%lu err=%d\n", + __func__, drv_data->res.start, drv_data->size, ret); + /* Attempt to give resource back to HLOS */ + qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res), &dstvmids, + src_vmlist, ARRAY_SIZE(src_vmlist)); + ret = -EPROBE_DEFER; + } + + kfree(acl); + kfree(sgl); + + return ret; +} + +static int _is_mem_shared(struct resource *res) +{ +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + return gh_cpusys_vm_get_share_mem_info(res); +#else + return -EINVAL; +#endif +} + +static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *data) +{ + struct gh_rm_notif_vm_status_payload *vm_status_payload; + struct hw_fence_driver_data *drv_data; + struct resource res; + gh_vmid_t peer_vmid; + gh_vmid_t self_vmid; + int ret; + + drv_data = container_of(nb, struct hw_fence_driver_data, rm_nb); + + HWFNC_DBG_INIT("cmd:0x%lx ++\n", cmd); + if (cmd != GH_RM_NOTIF_VM_STATUS) + goto end; + + vm_status_payload = data; + HWFNC_DBG_INIT("payload vm_status:%d\n", vm_status_payload->vm_status); + if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY && + vm_status_payload->vm_status != GH_RM_VM_STATUS_RESET) + goto end; + +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + if (ghd_rm_get_vmid(drv_data->peer_name, &peer_vmid)) + goto end; + + if (ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid)) + goto end; +#else + if (gh_rm_get_vmid(drv_data->peer_name, &peer_vmid)) + goto end; + + if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid)) + goto end; +#endif + + if (peer_vmid != vm_status_payload->vmid) + goto end; + + switch (vm_status_payload->vm_status) { + case GH_RM_VM_STATUS_READY: + ret = _is_mem_shared(&res); + if (ret) { + HWFNC_DBG_INIT("mem not shared ret:%d, attempt share\n", ret); + if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid)) + HWFNC_ERR("failed to share memory\n"); + else + drv_data->fctl_ready = true; + } else { + if (drv_data->res.start == res.start && + resource_size(&drv_data->res) == resource_size(&res)) { + drv_data->fctl_ready = true; + HWFNC_DBG_INIT("mem_ready: add:0x%llx size:%llu ret:%d\n", + res.start, resource_size(&res), ret); + } else { + HWFNC_ERR("mem-shared:[0x%llx,%llu] expected:[0x%llx,%llu]\n", + res.start, resource_size(&res), drv_data->res.start, + resource_size(&drv_data->res)); + } + } + break; + case GH_RM_VM_STATUS_RESET: + HWFNC_DBG_INIT("reset\n"); + break; + } + +end: + return NOTIFY_DONE; +} + +static int _register_vm_mem_with_hyp(struct hw_fence_driver_data *drv_data, + struct device_node *node_compat) +{ + int ret, notifier_ret; + + if (!drv_data || !node_compat) { + HWFNC_ERR("invalid params drv_data:0x%pK node_compat:0x%pK\n", drv_data, + node_compat); + return -EINVAL; + } + + ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->label); + if (ret) { + HWFNC_ERR("failed to find label info %d\n", ret); + return ret; + } + + /* Register memory with HYP for vm */ + ret = of_property_read_u32(node_compat, "peer-name", &drv_data->peer_name); + if (ret) + drv_data->peer_name = GH_SELF_VM; + + drv_data->rm_nb.notifier_call = hw_fence_rm_cb; + drv_data->rm_nb.priority = INT_MAX; + notifier_ret = gh_rm_register_notifier(&drv_data->rm_nb); + HWFNC_DBG_INIT("notifier: ret:%d peer_name:%d notifier_ret:%d\n", ret, + drv_data->peer_name, notifier_ret); + if (notifier_ret) { + HWFNC_ERR_ONCE("fail to register notifier ret:%d\n", ret); + return -EPROBE_DEFER; + } + + return 0; +} + +static int _init_soccp_mem(struct hw_fence_driver_data *drv_data) +{ + struct iommu_domain *domain; + u32 shbuf_soccp_va; + int ret; + + if (!drv_data) { + HWFNC_ERR("invalid params drv_data:0x%pK\n", drv_data); + return -EINVAL; + } + + ret = of_property_read_u32(drv_data->dev->of_node, "shbuf_soccp_va", &shbuf_soccp_va); + if (ret || !shbuf_soccp_va) { + if (drv_data->cpu_addr_cookie) { + HWFNC_ERR("non-static mem allocation w/out soccp_va dt ret:%d val:%d\n", + ret, shbuf_soccp_va); + return -EINVAL; + } + /* use one to one memory mapping if virtual address is not in dt */ + shbuf_soccp_va = drv_data->res.start; + } + + domain = iommu_get_domain_for_dev(drv_data->dev); + if (IS_ERR_OR_NULL(domain)) { + HWFNC_ERR("failed to get iommu domain for device ret:%ld\n", PTR_ERR(domain)); + return PTR_ERR(domain); + } + +#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE) + ret = iommu_map(domain, shbuf_soccp_va, drv_data->res.start, drv_data->size, + IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL); +#else + ret = iommu_map(domain, shbuf_soccp_va, drv_data->res.start, drv_data->size, + IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE); +#endif + if (ret) + HWFNC_ERR("failed to map for soccp smmu phys_addr:0x%llx va:0x%x sz:%lx ret:%d\n", + drv_data->res.start, shbuf_soccp_va, drv_data->size, ret); + else + HWFNC_DBG_INIT("mapped for soccp smmu phys_addr:0x%llx va:0x%x sz:%lx ret:%d\n", + drv_data->res.start, shbuf_soccp_va, drv_data->size, ret); + + return ret; +} + +/* Allocates carved-out mapped memory from device-tree */ +static int _alloc_mem_static(struct hw_fence_driver_data *drv_data, struct device_node *node_compat) +{ + struct device_node *np; + int ret; + + if (!drv_data || !node_compat) { + HWFNC_ERR("invalid drv_data:0x%pK node_compat:0x%pK\n", drv_data, node_compat); + return -EINVAL; + } + + np = of_parse_phandle(node_compat, "shared-buffer", 0); + if (!np) { + HWFNC_ERR("failed to read shared-buffer info\n"); + return -ENOMEM; + } + + ret = of_address_to_resource(np, 0, &drv_data->res); + of_node_put(np); + if (ret) { + HWFNC_ERR("of_address_to_resource failed %d\n", ret); + return -EINVAL; + } + + return 0; +} + +/* Allocates memory dynamically */ +static int _alloc_mem_dynamic(struct hw_fence_driver_data *drv_data) +{ + u32 events_size, size; + + if (!drv_data || !drv_data->has_soccp) { + HWFNC_ERR("invalid drv_data:0x%pK has_soccp:%d\n", drv_data, + drv_data ? drv_data->has_soccp : -1); + return -EINVAL; + } + + events_size = HW_FENCE_MAX_EVENTS * sizeof(struct msm_hw_fence_event); + if (drv_data->used_mem_size >= U32_MAX - events_size) { + HWFNC_ERR("invalid used_mem_size:%u events_size:%u\n", drv_data->used_mem_size, + events_size); + return -EINVAL; + } + + size = PAGE_ALIGN(drv_data->used_mem_size + events_size); + drv_data->cpu_addr_cookie = dma_alloc_attrs(drv_data->dev, size, &drv_data->res.start, + GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING); + if (!drv_data->cpu_addr_cookie) { + HWFNC_ERR("memory allocation failed!\n"); + return -ENOMEM; + } + + drv_data->res.end = drv_data->res.start + size - 1; + drv_data->res.name = "hwfence_shbuf"; + HWFNC_DBG_INIT("allocated memory start:0x%llx end:0x%llx size:0x%x\n", drv_data->res.start, + drv_data->res.end, size); + + return 0; +} + +/* Allocates carved-out mapped memory */ +int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) +{ + struct device_node *node = drv_data->dev->of_node; + struct device_node *node_compat; + const char *compat = "qcom,msm-hw-fence-mem"; + int ret; + + node_compat = of_find_compatible_node(node, NULL, compat); + if (!node_compat && !drv_data->has_soccp) { + HWFNC_ERR("Failed to find dev node with compat:%s\n", compat); + return -EINVAL; + } + + if (node_compat) + ret = _alloc_mem_static(drv_data, node_compat); + else + ret = _alloc_mem_dynamic(drv_data); + + if (ret) { + HWFNC_ERR("failed to allocate static or dynamic memory ret:%d\n", ret); + return ret; + } + + if (drv_data->has_soccp) + drv_data->io_mem_base = memremap(drv_data->res.start, resource_size(&drv_data->res), + MEMREMAP_WB); + else + drv_data->io_mem_base = devm_ioremap_wc(drv_data->dev, drv_data->res.start, + resource_size(&drv_data->res)); + + if (!drv_data->io_mem_base) { + HWFNC_ERR("ioremap failed!\n"); + return -ENXIO; + } + drv_data->size = resource_size(&drv_data->res); + if (drv_data->size < drv_data->used_mem_size) { + HWFNC_ERR("0x%lx size of carved-out memory region less than required size:0x%x\n", + drv_data->size, drv_data->used_mem_size); + return -ENOMEM; + } + + memset_io(drv_data->io_mem_base, 0x0, drv_data->size); + + HWFNC_DBG_INIT("va:0x%pK start:0x%llx sz:0x%lx name:%s cookie:0x%pK has_soccp:%s\n", + drv_data->io_mem_base, drv_data->res.start, drv_data->size, drv_data->res.name, + drv_data->cpu_addr_cookie, drv_data->has_soccp ? "true" : "false"); + + if (drv_data->has_soccp) + ret = _init_soccp_mem(drv_data); + else + ret = _register_vm_mem_with_hyp(drv_data, node_compat); + + if (ret) + HWFNC_ERR("failed to share mem with %s cpu_va:0x%pK pa:0x%llx sz:0x%lx name:%s\n", + drv_data->has_soccp ? "soccp" : "vm", drv_data->io_mem_base, + drv_data->res.start, drv_data->size, drv_data->res.name); + + return ret; +} + +char *_get_mem_reserve_type(enum hw_fence_mem_reserve type) +{ + switch (type) { + case HW_FENCE_MEM_RESERVE_CTRL_QUEUE: + return "HW_FENCE_MEM_RESERVE_CTRL_QUEUE"; + case HW_FENCE_MEM_RESERVE_LOCKS_REGION: + return "HW_FENCE_MEM_RESERVE_LOCKS_REGION"; + case HW_FENCE_MEM_RESERVE_TABLE: + return "HW_FENCE_MEM_RESERVE_TABLE"; + case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: + return "HW_FENCE_MEM_RESERVE_CLIENT_QUEUE"; + case HW_FENCE_MEM_RESERVE_EVENTS_BUFF: + return "HW_FENCE_MEM_RESERVE_EVENTS_BUFF"; + } + + return "Unknown"; +} + +/* Calculates the memory range for each of the elements in the carved-out memory */ +int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, + enum hw_fence_mem_reserve type, phys_addr_t *phys, void **pa, u32 *size, int client_id) +{ + int ret = 0; + u32 start_offset = 0; + u32 remaining_size_bytes; + u32 total_events; + + switch (type) { + case HW_FENCE_MEM_RESERVE_CTRL_QUEUE: + start_offset = 0; + *size = drv_data->hw_fence_mem_ctrl_queues_size; + break; + case HW_FENCE_MEM_RESERVE_LOCKS_REGION: + /* Locks region starts at the end of the ctrl queues */ + start_offset = drv_data->hw_fence_mem_ctrl_queues_size; + *size = HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num); + break; + case HW_FENCE_MEM_RESERVE_TABLE: + /* HW Fence table starts at the end of the Locks region */ + start_offset = drv_data->hw_fence_mem_ctrl_queues_size + + HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num); + *size = drv_data->hw_fence_mem_fences_table_size; + break; + case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: + if (client_id >= drv_data->clients_num || + !drv_data->hw_fence_client_queue_size[client_id].type) { + HWFNC_ERR("unexpected client_id:%d for clients_num:%d\n", client_id, + drv_data->clients_num); + ret = -EINVAL; + goto exit; + } + + start_offset = drv_data->hw_fence_client_queue_size[client_id].start_offset; + *size = drv_data->hw_fence_client_queue_size[client_id].type->mem_size; + break; + case HW_FENCE_MEM_RESERVE_EVENTS_BUFF: + start_offset = drv_data->used_mem_size; + remaining_size_bytes = drv_data->size - start_offset; + if (start_offset >= drv_data->size || + remaining_size_bytes < sizeof(struct msm_hw_fence_event)) { + HWFNC_DBG_INFO("no space for events total_sz:%lu offset:%u evt_sz:%lu\n", + drv_data->size, start_offset, sizeof(struct msm_hw_fence_event)); + ret = -ENOMEM; + goto exit; + } + + total_events = remaining_size_bytes / sizeof(struct msm_hw_fence_event); + if (total_events > HW_FENCE_MAX_EVENTS) + total_events = HW_FENCE_MAX_EVENTS; + *size = total_events * sizeof(struct msm_hw_fence_event); + break; + default: + HWFNC_ERR("Invalid mem reserve type:%d\n", type); + ret = -EINVAL; + break; + } + + if (start_offset + *size > drv_data->size) { + HWFNC_ERR("reservation request exceeds total size:%lu\n", + drv_data->size); + return -ENOMEM; + } + + HWFNC_DBG_INIT("type:%s (%d) start:0x%llx start_offset:%u size:0x%x\n", + _get_mem_reserve_type(type), type, drv_data->res.start, + start_offset, *size); + + + *phys = drv_data->res.start + (phys_addr_t)start_offset; + *pa = (drv_data->io_mem_base + start_offset); /* offset is in bytes */ + HWFNC_DBG_H("phys:0x%llx pa:0x%pK\n", *phys, *pa); + +exit: + return ret; +} + +static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_data, + struct hw_fence_client_type_desc *desc) +{ + u32 max_idx_from_zero, payload_size_u32 = HW_FENCE_CLIENT_QUEUE_PAYLOAD / sizeof(u32); + char name[DT_PROPS_CLIENT_EXTRA_PROPS_SIZE]; + u32 tmp[5]; + bool idx_by_payload = false; + int count, ret; + + snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s-extra", desc->name); + + /* check if property is present */ + ret = of_property_read_bool(drv_data->dev->of_node, name); + if (!ret) + return 0; + + count = of_property_count_u32_elems(drv_data->dev->of_node, name); + if (count <= 0 || count > 5) { + HWFNC_ERR("invalid %s extra dt props count:%d\n", desc->name, count); + return -EINVAL; + } + + ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, count); + if (ret) { + HWFNC_ERR("Failed to read %s extra dt properties ret=%d count=%d\n", desc->name, + ret, count); + ret = -EINVAL; + goto exit; + } + + desc->start_padding = tmp[0]; + if (count >= 2) + desc->end_padding = tmp[1]; + if (count >= 3) + desc->txq_idx_start = tmp[2]; + if (count >= 4) { + if (tmp[3] > 1) { + HWFNC_ERR("%s invalid txq_idx_by_payload prop:%u\n", desc->name, tmp[3]); + ret = -EINVAL; + goto exit; + } + idx_by_payload = tmp[3]; + desc->txq_idx_factor = idx_by_payload ? payload_size_u32 : 1; + } + if (count >= 5) { + if (tmp[4] > 1) { + HWFNC_ERR("%s invalid skip_fctl_ref prop:%u\n", desc->name, tmp[4]); + ret = -EINVAL; + goto exit; + } + desc->skip_fctl_ref = 1; + } + + if (desc->start_padding % sizeof(u32) || desc->end_padding % sizeof(u32) || + (desc->start_padding + desc->end_padding) % sizeof(u64)) { + HWFNC_ERR("%s start_padding:%u end_padding:%u violates mem alignment\n", + desc->name, desc->start_padding, desc->end_padding); + ret = -EINVAL; + goto exit; + } + + if (desc->start_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num, + drv_data->has_soccp)) { + HWFNC_ERR("%s client queues_num:%u start_padding:%u will overflow mem_size\n", + desc->name, desc->queues_num, desc->start_padding); + ret = -EINVAL; + goto exit; + } + + if (desc->end_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num, + drv_data->has_soccp) - desc->start_padding) { + HWFNC_ERR("%s client q_num:%u start_p:%u end_p:%u will overflow mem_size\n", + desc->name, desc->queues_num, desc->start_padding, desc->end_padding); + ret = -EINVAL; + goto exit; + } + + max_idx_from_zero = idx_by_payload ? desc->queue_entries : + desc->queue_entries * payload_size_u32; + if (desc->txq_idx_start >= U32_MAX - max_idx_from_zero) { + HWFNC_ERR("%s txq_idx start:%u by_payload:%s q_entries:%u will overflow txq_idx\n", + desc->name, desc->txq_idx_start, idx_by_payload ? "true" : "false", + desc->queue_entries); + ret = -EINVAL; + goto exit; + } + + HWFNC_DBG_INIT("%s: start_p=%u end_p=%u txq_idx_start:%u idx_by_payload:%s skip_ref:%s\n", + desc->name, desc->start_padding, desc->end_padding, desc->txq_idx_start, + idx_by_payload ? "true" : "false", desc->skip_fctl_ref ? "true" : "false"); + +exit: + return ret; +} + +static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_data, + struct hw_fence_client_type_desc *desc) +{ + char name[DT_PROPS_CLIENT_PROPS_SIZE]; + u32 tmp[4]; + u32 queue_size; + int ret; + + /* parse client queue properties from device-tree */ + snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s", desc->name); + ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 4); + if (ret) { + HWFNC_DBG_INIT("missing %s client queue entry or invalid ret:%d\n", desc->name, + ret); + desc->queue_entries = drv_data->hw_fence_queue_entries; + } else { + desc->clients_num = tmp[0]; + desc->queues_num = tmp[1]; + desc->queue_entries = tmp[2]; + + if (tmp[3] > 1) { + HWFNC_ERR("%s invalid skip_txq_wr_idx prop:%u\n", desc->name, tmp[3]); + return -EINVAL; + } + desc->skip_txq_wr_idx = tmp[3]; + } + + if (desc->clients_num > desc->max_clients_num || !desc->queues_num || + desc->queues_num > HW_FENCE_CLIENT_QUEUES || !desc->queue_entries) { + HWFNC_ERR("%s invalid dt: clients_num:%u queues_num:%u, queue_entries:%u\n", + desc->name, desc->clients_num, desc->queues_num, desc->queue_entries); + return -EINVAL; + } + + /* parse extra client queue properties from device-tree */ + ret = _parse_client_queue_dt_props_extra(drv_data, desc); + if (ret) { + HWFNC_ERR("%s failed to parse extra dt props\n", desc->name); + return -EINVAL; + } + + /* compute mem_size */ + if (desc->queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) { + HWFNC_ERR("%s client queue entries:%u will overflow client queue size\n", + desc->name, desc->queue_entries); + return -EINVAL; + } + + queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries; + if (queue_size >= ((U32_MAX & PAGE_MASK) - + (HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num, drv_data->has_soccp) + + desc->start_padding + desc->end_padding)) / desc->queues_num) { + HWFNC_ERR("%s client queue_sz:%u start_p:%u end_p:%u will overflow mem size\n", + desc->name, queue_size, desc->start_padding, desc->end_padding); + return -EINVAL; + } + + desc->mem_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num, + drv_data->has_soccp) + (queue_size * desc->queues_num) + desc->start_padding + + desc->end_padding); + + if (desc->mem_size > MAX_CLIENT_QUEUE_MEM_SIZE) { + HWFNC_ERR("%s client queue mem_size:%u greater than max mem size:%d\n", + desc->name, desc->mem_size, MAX_CLIENT_QUEUE_MEM_SIZE); + return -EINVAL; + } + + HWFNC_DBG_INIT("%s: clients=%u q_num=%u q_entries=%u mem_sz=%u skips_wr_ptr:%s\n", + desc->name, desc->clients_num, desc->queues_num, desc->queue_entries, + desc->mem_size, desc->skip_txq_wr_idx ? "true" : "false"); + + return 0; +} + +static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data) +{ + struct hw_fence_client_type_desc *desc; + int i, j, ret; + u32 start_offset; + size_t size; + int configurable_clients_num = 0; + + drv_data->rxq_clients_num = HW_FENCE_MIN_RXQ_CLIENTS; + for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) { + desc = &hw_fence_client_types[i]; + ret = _parse_client_queue_dt_props_indv(drv_data, desc); + if (ret) { + HWFNC_ERR("failed to initialize %s client queue size properties\n", + desc->name); + return ret; + } + + if (i >= HW_FENCE_MIN_RXQ_CLIENT_TYPE && + desc->queues_num == HW_FENCE_CLIENT_QUEUES) + drv_data->rxq_clients_num += desc->clients_num; + + if (i >= HW_FENCE_MAX_CLIENT_TYPE_STATIC) + configurable_clients_num += desc->clients_num; + } + + /* store client type descriptors for configurable client indexing logic */ + drv_data->hw_fence_client_types = hw_fence_client_types; + + /* clients and size desc are allocated for all static clients regardless of device-tree */ + drv_data->clients_num = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + configurable_clients_num; + + /* allocate memory for client queue size descriptors */ + size = drv_data->clients_num * sizeof(struct hw_fence_client_queue_desc); + drv_data->hw_fence_client_queue_size = kzalloc(size, GFP_KERNEL); + if (!drv_data->hw_fence_client_queue_size) + return -ENOMEM; + + /* initialize client queue size desc for each client */ + start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size + + HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num) + + drv_data->hw_fence_mem_fences_table_size); + for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) { + desc = &hw_fence_client_types[i]; + for (j = 0; j < desc->clients_num; j++) { + enum hw_fence_client_id client_id_ext = desc->init_id + j; + enum hw_fence_client_id client_id = + hw_fence_utils_get_client_id_priv(drv_data, client_id_ext); + + drv_data->hw_fence_client_queue_size[client_id] = + (struct hw_fence_client_queue_desc){desc, start_offset}; + HWFNC_DBG_INIT("%s client_id_ext:%u client_id:%u start_offset:%u\n", + desc->name, client_id_ext, client_id, start_offset); + start_offset += desc->mem_size; + } + } + drv_data->used_mem_size = start_offset; + + return 0; +} + +int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) +{ + int ret; + size_t size; + u32 val = 0; + struct hw_fence_soccp *soccp_props = &drv_data->soccp_props; + + /* check presence of soccp */ + ret = of_property_read_u32(drv_data->dev->of_node, "soccp_controller", + &soccp_props->rproc_ph); + if (!ret && soccp_props->rproc_ph) + drv_data->has_soccp = true; + + ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-table-entries", &val); + if (ret || !val) { + HWFNC_ERR("missing hw fences table entry or invalid ret:%d val:%d\n", ret, val); + return ret; + } + drv_data->hw_fence_table_entries = val; + + if (drv_data->hw_fence_table_entries >= U32_MAX / sizeof(struct msm_hw_fence)) { + HWFNC_ERR("table entries:%u will overflow table size\n", + drv_data->hw_fence_table_entries); + return -EINVAL; + } + drv_data->hw_fence_mem_fences_table_size = (sizeof(struct msm_hw_fence) * + drv_data->hw_fence_table_entries); + + ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-queue-entries", &val); + if (ret || !val) { + HWFNC_ERR("missing queue entries table entry or invalid ret:%d val:%d\n", ret, val); + return ret; + } + drv_data->hw_fence_queue_entries = val; + + /* ctrl queues init */ + + if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CTRL_QUEUE_PAYLOAD) { + HWFNC_ERR("queue entries:%u will overflow ctrl queue size\n", + drv_data->hw_fence_queue_entries); + return -EINVAL; + } + drv_data->hw_fence_ctrl_queue_size = HW_FENCE_CTRL_QUEUE_PAYLOAD * + HW_FENCE_CTRL_QUEUE_ENTRIES; + + if (drv_data->hw_fence_ctrl_queue_size >= (U32_MAX - + HW_FENCE_HFI_CTRL_HEADERS_SIZE(drv_data->has_soccp)) / + HW_FENCE_CTRL_QUEUES) { + HWFNC_ERR("queue size:%u will overflow ctrl queue mem size\n", + drv_data->hw_fence_ctrl_queue_size); + return -EINVAL; + } + drv_data->hw_fence_mem_ctrl_queues_size = + HW_FENCE_HFI_CTRL_HEADERS_SIZE(drv_data->has_soccp) + + (HW_FENCE_CTRL_QUEUES * drv_data->hw_fence_ctrl_queue_size); + + /* clients queues init */ + + ret = _parse_client_queue_dt_props(drv_data); + if (ret) { + HWFNC_ERR("failed to parse client queue properties\n"); + return -EINVAL; + } + + /* allocate clients */ + + size = drv_data->clients_num * sizeof(struct msm_hw_fence_client *); + drv_data->clients = kzalloc(size, GFP_KERNEL); + if (!drv_data->clients) + return -ENOMEM; + + HWFNC_DBG_INIT("table: entries=%u mem_size=%u queue: entries=%u\b", + drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size, + drv_data->hw_fence_queue_entries); + HWFNC_DBG_INIT("ctrl queue: size=%u mem_size=%u\b", + drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size); + HWFNC_DBG_INIT("clients_num: %u, total_mem_size:%u\n", drv_data->clients_num, + drv_data->used_mem_size); + HWFNC_DBG_INIT("has_soccp:%s\n", drv_data->has_soccp ? "true" : "false"); + + return 0; +} + +int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data) +{ + int ret; + u32 reg_config[2]; + void __iomem *ptr; + + /* Get ipcc memory range */ + ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,ipcc-reg", + reg_config, 2); + if (ret) { + HWFNC_ERR("failed to read ipcc reg: %d\n", ret); + return ret; + } + drv_data->ipcc_reg_base = reg_config[0]; + drv_data->ipcc_size = reg_config[1]; + + /* Mmap ipcc registers */ + ptr = devm_ioremap(drv_data->dev, drv_data->ipcc_reg_base, drv_data->ipcc_size); + if (!ptr) { + HWFNC_ERR("failed to ioremap ipcc regs\n"); + return -ENOMEM; + } + drv_data->ipcc_io_mem = ptr; + + HWFNC_DBG_H("mapped address:0x%llx size:0x%x io_mem:0x%pK\n", + drv_data->ipcc_reg_base, drv_data->ipcc_size, + drv_data->ipcc_io_mem); + + hw_fence_ipcc_enable_signaling(drv_data); + + return ret; +} + +int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data) +{ + int ret = 0; + unsigned int reg_config[2]; + void __iomem *ptr; + + ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,qtime-reg", + reg_config, 2); + if (ret) { + HWFNC_ERR("failed to read qtimer reg: %d\n", ret); + return ret; + } + + drv_data->qtime_reg_base = reg_config[0]; + drv_data->qtime_size = reg_config[1]; + + ptr = devm_ioremap(drv_data->dev, drv_data->qtime_reg_base, drv_data->qtime_size); + if (!ptr) { + HWFNC_ERR("failed to ioremap qtime regs\n"); + return -ENOMEM; + } + + drv_data->qtime_io_mem = ptr; + + return ret; +} + +enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data, + enum hw_fence_client_id client_id) +{ + int i, client_type, offset; + enum hw_fence_client_id client_id_priv; + + if (client_id < HW_FENCE_MAX_STATIC_CLIENTS_INDEX) + return client_id; + + /* consolidate external 'hw_fence_client_id' enum into consecutive internal client IDs */ + client_type = HW_FENCE_MAX_CLIENT_TYPE_STATIC + + (client_id - HW_FENCE_MAX_STATIC_CLIENTS_INDEX) / + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT; + offset = (client_id - HW_FENCE_MAX_STATIC_CLIENTS_INDEX) % + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT; + + /* invalid client id out of range of supported configurable sub-clients */ + if (offset >= drv_data->hw_fence_client_types[client_type].clients_num) + return HW_FENCE_CLIENT_MAX; + + client_id_priv = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + offset; + + for (i = HW_FENCE_MAX_CLIENT_TYPE_STATIC; i < client_type; i++) + client_id_priv += drv_data->hw_fence_client_types[i].clients_num; + + return client_id_priv; +} + +int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id) +{ + if (!drv_data || client_id >= drv_data->clients_num || + !drv_data->hw_fence_client_queue_size[client_id].type) { + HWFNC_ERR("invalid access to client:%d queues_num\n", client_id); + return 0; + } + + return drv_data->hw_fence_client_queue_size[client_id].type->queues_num; +} + +int hw_fence_utils_get_skip_fctl_ref(struct hw_fence_driver_data *drv_data, int client_id) +{ + if (!drv_data || client_id >= drv_data->clients_num || + !drv_data->hw_fence_client_queue_size[client_id].type) { + HWFNC_ERR("invalid access to client:%d skip_fctl_ref\n", client_id); + return 0; + } + + return drv_data->hw_fence_client_queue_size[client_id].type->skip_fctl_ref; +} diff --git a/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_ioctl.c b/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_ioctl.c new file mode 100644 index 0000000000..f75fcdd874 --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/src/hw_fence_ioctl.c @@ -0,0 +1,580 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_debug.h" +#include "hw_fence_drv_fence.h" + +#define HW_SYNC_IOCTL_COUNT ARRAY_SIZE(hw_sync_debugfs_ioctls) +#define HW_FENCE_ARRAY_SIZE 10 +#define HW_SYNC_IOC_MAGIC 'W' +#define HW_SYNC_IOC_REG_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 10, unsigned long) +#define HW_SYNC_IOC_UNREG_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 11, unsigned long) +#define HW_SYNC_IOC_CREATE_FENCE _IOWR(HW_SYNC_IOC_MAGIC, 12,\ + struct hw_fence_sync_create_data) +#define HW_SYNC_IOC_CREATE_FENCE_ARRAY _IOWR(HW_SYNC_IOC_MAGIC, 14,\ + struct hw_fence_array_sync_create_data) +#define HW_SYNC_IOC_REG_FOR_WAIT _IOWR(HW_SYNC_IOC_MAGIC, 16, int) +#define HW_SYNC_IOC_FENCE_SIGNAL _IOWR(HW_SYNC_IOC_MAGIC, 17, unsigned long) +#define HW_SYNC_IOC_FENCE_WAIT _IOWR(HW_SYNC_IOC_MAGIC, 18, int) +#define HW_SYNC_IOC_RESET_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 19, unsigned long) +#define HW_FENCE_IOCTL_NR(n) (_IOC_NR(n) - 2) +#define HW_IOCTL_DEF(ioctl, _func) \ + [HW_FENCE_IOCTL_NR(ioctl)] = { \ + .cmd = ioctl, \ + .func = _func, \ + .name = #ioctl \ + } + +/** + * struct hw_sync_obj - per client hw sync object. + * @context: context id used to create fences. + * @client_id: to uniquely represent client. + * @client_handle: Pointer to the structure holding the resources + * allocated to the client. + * @mem_descriptor: Memory descriptor of the queue allocated by the + * hardware fence driver for each client during register. + */ +struct hw_sync_obj { + u64 context; + int client_id; + void *client_handle; + struct msm_hw_fence_mem_addr mem_descriptor; +}; + +/** + * struct hw_fence_sync_create_data - data used in creating fences. + * @seqno: sequence number. + * @incr_context: if set, then the context would be incremented. + * @fence: returns the fd of the new sync_file with the created fence. + * @hash: fence hash + */ +struct hw_fence_sync_create_data { + u64 seqno; + bool incr_context; + __s32 fence; + u64 hash; +}; + +/** + * struct hw_fence_array_sync_create_data - data used in creating multiple fences. + * @seqno: sequence number used to create fence array. + * @num_fences: number of fence fds received. + * @fences: array of fence fds. + * @fence_array_fd: fd of fence array. + */ +struct hw_fence_array_sync_create_data { + u64 seqno; + int num_fences; + u64 fences[HW_FENCE_ARRAY_SIZE]; + __s32 fence_array_fd; +}; + +/** + * struct hw_fence_sync_signal_data - data used to signal fences. + * @hash: hash of the fence. + * @error_flag: error flag + */ +struct hw_fence_sync_signal_data { + u64 hash; + u32 error_flag; +}; + +/** + * struct hw_fence_sync_wait_data - data used to wait on fences. + * @fence: fence fd. + * @timeout_ms: fence wait time out. + */ +struct hw_fence_sync_wait_data { + __s32 fence; + u64 timeout_ms; +}; + +/** + * struct hw_fence_sync_reset_data - data used to reset client. + * @client_id: client id. + * @reset_flag: reset flag + */ +struct hw_fence_sync_reset_data { + int client_id; + u32 reset_flag; +}; + +typedef long hw_fence_ioctl_t(struct hw_sync_obj *obj, unsigned long arg); + +/** + * struct hw_sync_ioctl_def - hw_sync driver ioctl entry + * @cmd: ioctl command number, without flags + * @func: handler for this ioctl + * @name: user-readable name for debug output + */ +struct hw_sync_ioctl_def { + unsigned int cmd; + hw_fence_ioctl_t *func; + const char *name; +}; + +static bool _is_valid_client(struct hw_sync_obj *obj) +{ + if (!obj) + return false; + + if (obj->client_id < HW_FENCE_CLIENT_ID_VAL0 || obj->client_id > HW_FENCE_CLIENT_ID_VAL6) { + HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", obj->client_id, + HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6); + return false; + } + + return true; +} + +static int _get_client_id(struct hw_sync_obj *obj, unsigned long arg) +{ + int client_id; + + if (copy_from_user(&client_id, (void __user *)arg, sizeof(client_id))) + return -EFAULT; + + if (!obj) + return -EINVAL; + + if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id > HW_FENCE_CLIENT_ID_VAL6) { + HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", client_id, + HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6); + return -EINVAL; + } + + return client_id; +} + +static void *_hw_sync_get_fence(int fd) +{ + return fd >= 0 ? sync_file_get_fence(fd) : NULL; +} + +static int hw_sync_debugfs_open(struct inode *inode, struct file *file) +{ + struct hw_sync_obj *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return -ENOMEM; + + obj->context = dma_fence_context_alloc(1); + file->private_data = obj; + + return 0; +} + +static int hw_sync_debugfs_release(struct inode *inode, struct file *file) +{ + struct hw_sync_obj *obj = file->private_data; + + if (!obj) + return -EINVAL; + + kfree(obj); + + return 0; +} + +static long hw_sync_ioctl_reg_client(struct hw_sync_obj *obj, unsigned long arg) +{ + int client_id = _get_client_id(obj, arg); + + if (IS_ERR(&client_id)) { + return client_id; + } else if (obj->client_handle) { + HWFNC_ERR("client:%d already registered as validation client\n", client_id); + return -EINVAL; + } + + obj->client_id = client_id; + obj->client_handle = msm_hw_fence_register(obj->client_id, &obj->mem_descriptor); + if (IS_ERR_OR_NULL(obj->client_handle)) + return -EINVAL; + + return 0; +} + +static long hw_sync_ioctl_unreg_client(struct hw_sync_obj *obj, unsigned long arg) +{ + int client_id = _get_client_id(obj, arg); + + if (IS_ERR(&client_id)) { + return client_id; + } else if (client_id != obj->client_id) { + HWFNC_ERR("deregistering hw-fence client %d with invalid client_id arg:%d\n", + obj->client_id, client_id); + return -EINVAL; + } + + return msm_hw_fence_deregister(obj->client_handle); +} + +static long hw_sync_ioctl_create_fence(struct hw_sync_obj *obj, unsigned long arg) +{ + struct msm_hw_fence_create_params params; + struct hw_fence_sync_create_data data; + struct hw_dma_fence *fence; + struct dma_fence *dma_fence; + u64 hash; + struct sync_file *sync_file; + int fd, ret; + + if (!_is_valid_client(obj)) { + return -EINVAL; + } else if (IS_ERR_OR_NULL(obj->client_handle)) { + HWFNC_ERR("client:%d is not register as validation client\n", obj->client_id); + return -EINVAL; + } + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + dma_fence = hw_dma_fence_init(obj->client_handle, obj->context, data.seqno); + if (IS_ERR_OR_NULL(dma_fence)) + return -EINVAL; + fence = (struct hw_dma_fence *)dma_fence; + + params.fence = dma_fence; + params.handle = &hash; + + /* create hw fence */ + ret = msm_hw_fence_create(obj->client_handle, ¶ms); + if (ret) { + HWFNC_ERR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n", + obj->client_id, obj->context, data.seqno); + dma_fence_put(&fence->base); + return -EINVAL; + } + + /* keep handle in dma_fence, to destroy hw-fence during release */ + fence->client_handle = obj->client_handle; + + if (data.incr_context) + obj->context = dma_fence_context_alloc(1); + + /* create fd */ + fd = get_unused_fd_flags(0); + if (fd < 0) { + HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id); + dma_fence_put(&fence->base); + return fd; + } + + sync_file = sync_file_create(&fence->base); + if (sync_file == NULL) { + HWFNC_ERR("couldn't create fence fd, %d\n", fd); + dma_fence_put(&fence->base); + ret = -EINVAL; + goto exit; + } + + /* Decrement the refcount that sync_file_create increments */ + dma_fence_put(&fence->base); + + data.fence = fd; + data.hash = hash; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + dma_fence_put(&fence->base); + fput(sync_file->file); + ret = -EFAULT; + goto exit; + } + + fd_install(fd, sync_file->file); + + return 0; + +exit: + put_unused_fd(fd); + return ret; +} + +static void _put_child_fences(int i, struct dma_fence **fences) +{ + int fence_idx; + + for (fence_idx = i; fence_idx >= 0 ; fence_idx--) + dma_fence_put(fences[i]); +} + +static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned long arg) +{ + struct dma_fence_array *fence_array; + struct hw_fence_array_sync_create_data data; + struct dma_fence **fences = NULL; + struct sync_file *sync_file; + int num_fences, i, fd, ret; + struct hw_dma_fence *fence; + + if (!_is_valid_client(obj)) { + return -EINVAL; + } else if (IS_ERR_OR_NULL(obj->client_handle)) { + HWFNC_ERR("client:%d is not register as validation client\n", obj->client_id); + return -EINVAL; + } + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + num_fences = data.num_fences; + if (num_fences > HW_FENCE_ARRAY_SIZE) { + HWFNC_ERR("Number of fences: %d is greater than allowed size: %d\n", + num_fences, HW_FENCE_ARRAY_SIZE); + return -EINVAL; + } + + fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); + if (!fences) { + return -ENOMEM; + } + + for (i = 0; i < num_fences; i++) { + fd = data.fences[i]; + if (fd <= 0) { + kfree(fences); + return -EINVAL; + } + fence = (struct hw_dma_fence *)_hw_sync_get_fence(fd); + if (!fence) { + _put_child_fences(i-1, fences); + kfree(fences); + return -EINVAL; + } + fences[i] = &fence->base; + } + + /* create the fence array from array of dma fences */ + fence_array = dma_fence_array_create(num_fences, fences, obj->context, data.seqno, 0); + if (!fence_array) { + HWFNC_ERR("Error creating fence_array\n"); + /* decrease the refcount incremented for each child fences */ + for (i = 0; i < num_fences; i++) + dma_fence_put(fences[i]); + kfree(fences); + return -EINVAL; + } + + /* create fd */ + fd = get_unused_fd_flags(0); + if (fd <= 0) { + HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id); + dma_fence_put(&fence_array->base); + return fd; + } + + sync_file = sync_file_create(&fence_array->base); + if (sync_file == NULL) { + HWFNC_ERR("couldn't create fence fd, %d\n", fd); + dma_fence_put(&fence_array->base); + kfree(fence_array); + ret = -EINVAL; + goto exit; + } + + /* Decrement the refcount that sync_file_create increments */ + dma_fence_put(&fence_array->base); + + data.fence_array_fd = fd; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + fput(sync_file->file); + dma_fence_put(&fence_array->base); + ret = -EFAULT; + goto exit; + } + + fd_install(fd, sync_file->file); + + return 0; + +exit: + put_unused_fd(fd); + return ret; +} + +/* + * this IOCTL only supports receiving one fence as input-parameter, which can be + * either a "dma_fence" or a "dma_fence_array", but eventually we would expand + * this API to receive more fences + */ +static long hw_sync_ioctl_reg_for_wait(struct hw_sync_obj *obj, unsigned long arg) +{ + struct dma_fence *fence; + int ret, fd, num_fences = 1; + + if (!_is_valid_client(obj)) + return -EINVAL; + + if (copy_from_user(&fd, (void __user *)arg, sizeof(fd))) + return -EFAULT; + + fence = (struct dma_fence *)_hw_sync_get_fence(fd); + if (!fence) { + HWFNC_ERR("Invalid fence fd: %d\n", fd); + return -EINVAL; + } + + ret = msm_hw_fence_wait_update(obj->client_handle, &fence, num_fences, 1); + + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + + return ret; +} + +static long hw_sync_ioctl_fence_signal(struct hw_sync_obj *obj, unsigned long arg) +{ + struct msm_hw_fence_client *hw_fence_client; + struct hw_fence_sync_signal_data data; + int ret, tx_client, rx_client, signal_id; + + if (!_is_valid_client(obj)) { + return -EINVAL; + } else if (IS_ERR_OR_NULL(obj->client_handle)) { + HWFNC_ERR("invalid client handle for the client_id: %d\n", obj->client_id); + return -EINVAL; + } + + hw_fence_client = (struct msm_hw_fence_client *)obj->client_handle; + if (!hw_fence_client) { + HWFNC_ERR("invalid client handle\n"); + return -EINVAL; + } + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + ret = msm_hw_fence_update_txq(obj->client_handle, data.hash, 0, data.error_flag); + if (ret) { + HWFNC_ERR("hw fence update txq has failed client_id: %d\n", obj->client_id); + return ret; + } + + signal_id = dbg_out_clients_signal_map_no_dpu[obj->client_id].ipc_signal_id; + if (signal_id < 0) + return -EINVAL; + + tx_client = hw_fence_client->ipc_client_pid; + rx_client = hw_fence_client->ipc_client_vid; + ret = msm_hw_fence_trigger_signal(obj->client_handle, tx_client, rx_client, signal_id); + if (ret) { + HWFNC_ERR("hw fence trigger signal has failed\n"); + return ret; + } + + return 0; +} + +static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg) +{ + struct msm_hw_fence_client *hw_fence_client; + struct hw_fence_sync_wait_data data; + struct dma_fence *fence; + int fd, ret; + u32 error; + + if (!_is_valid_client(obj)) + return -EINVAL; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + fd = data.fence; + fence = (struct dma_fence *)_hw_sync_get_fence(fd); + if (!fence) { + HWFNC_ERR("Invalid fence fd: %d\n", fd); + return -EINVAL; + } + + hw_fence_client = (struct msm_hw_fence_client *)obj->client_handle; + if (!hw_fence_client) { + HWFNC_ERR("invalid client handle for fd:%d\n", fd); + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + return -EINVAL; + } + + ret = hw_fence_debug_wait_val(hw_fence_drv_data, hw_fence_client, fence, 0, 0, + data.timeout_ms, &error); + if (ret) + HWFNC_ERR("failed to wait for hw-fence client:%d ctx:%llu seq:%llu\n", + hw_fence_client->client_id, fence->context, fence->seqno); + + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + + return ret; +} + +static long hw_sync_ioctl_reset_client(struct hw_sync_obj *obj, unsigned long arg) +{ + int ret; + struct hw_fence_sync_reset_data data; + + if (!_is_valid_client(obj)) { + return -EINVAL; + } else if (IS_ERR_OR_NULL(obj->client_handle)) { + HWFNC_ERR("client:%d handle doesn't exists\n", obj->client_id); + return -EINVAL; + } + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + ret = msm_hw_fence_reset_client(obj->client_handle, data.reset_flag); + if (ret) { + HWFNC_ERR("hw fence reset client has failed\n"); + return ret; + } + + return 0; +} + +static const struct hw_sync_ioctl_def hw_sync_debugfs_ioctls[] = { + HW_IOCTL_DEF(HW_SYNC_IOC_REG_CLIENT, hw_sync_ioctl_reg_client), + HW_IOCTL_DEF(HW_SYNC_IOC_UNREG_CLIENT, hw_sync_ioctl_unreg_client), + HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE, hw_sync_ioctl_create_fence), + HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE_ARRAY, hw_sync_ioctl_create_fence_array), + HW_IOCTL_DEF(HW_SYNC_IOC_REG_FOR_WAIT, hw_sync_ioctl_reg_for_wait), + HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_SIGNAL, hw_sync_ioctl_fence_signal), + HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_WAIT, hw_sync_ioctl_fence_wait), + HW_IOCTL_DEF(HW_SYNC_IOC_RESET_CLIENT, hw_sync_ioctl_reset_client) +}; + +static long hw_sync_debugfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct hw_sync_obj *obj = file->private_data; + int num = HW_FENCE_IOCTL_NR(cmd); + hw_fence_ioctl_t *func; + + if (num >= HW_SYNC_IOCTL_COUNT) { + HWFNC_ERR("invalid ioctl num = %d\n", num); + return -EINVAL; + } + + func = (&hw_sync_debugfs_ioctls[num])->func; + if (unlikely(!func)) { + HWFNC_ERR("no function num = %d\n", num); + return -ENOTTY; + } + + return func(obj, arg); +} + +const struct file_operations hw_sync_debugfs_fops = { + .open = hw_sync_debugfs_open, + .release = hw_sync_debugfs_release, + .unlocked_ioctl = hw_sync_debugfs_ioctl, +}; diff --git a/qcom/opensource/mm-drivers/hw_fence/src/msm_hw_fence.c b/qcom/opensource/mm-drivers/hw_fence/src/msm_hw_fence.c new file mode 100644 index 0000000000..93d45886b5 --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/src/msm_hw_fence.c @@ -0,0 +1,972 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#if (KERNEL_VERSION(6, 1, 25) <= LINUX_VERSION_CODE) +#include +#endif +#include + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_debug.h" +#include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_fence.h" + +struct hw_fence_driver_data *hw_fence_drv_data; +#if IS_ENABLED(CONFIG_QTI_ENABLE_HW_FENCE_DEFAULT) +bool hw_fence_driver_enable = true; +#else +bool hw_fence_driver_enable; +#endif + +static int _set_power_vote_if_needed(struct hw_fence_driver_data *drv_data, + u32 client_id, bool state) +{ + int ret = 0; + +#if IS_ENABLED(CONFIG_DEBUG_FS) + if (drv_data->has_soccp && client_id >= HW_FENCE_CLIENT_ID_VAL0 && + client_id <= HW_FENCE_CLIENT_ID_VAL6) { + ret = hw_fence_utils_set_power_vote(drv_data, state); + } +#endif /* CONFIG_DEBUG_FS */ + + return ret; +} + +static void msm_hw_fence_client_destroy(struct kref *kref) +{ + struct msm_hw_fence_client *hw_fence_client = container_of(kref, + struct msm_hw_fence_client, kref); + hw_fence_cleanup_client(hw_fence_drv_data, hw_fence_client); +} + +void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, + struct msm_hw_fence_mem_addr *mem_descriptor) +{ + struct msm_hw_fence_client *hw_fence_client; + enum hw_fence_client_id client_id; + int ret; + + if (!hw_fence_driver_enable) + return ERR_PTR(-ENODEV); + + HWFNC_DBG_H("++ client_id_ext:%d\n", client_id_ext); + + ret = hw_fence_check_hw_fence_driver(hw_fence_drv_data); + if (ret) + return ERR_PTR(ret); + + if (client_id_ext >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid client_id_ext:%d\n", client_id_ext); + return ERR_PTR(-EINVAL); + } + + client_id = hw_fence_utils_get_client_id_priv(hw_fence_drv_data, client_id_ext); + if (client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid params: client_id:%d client_id_ext:%d\n", + client_id, client_id_ext); + return ERR_PTR(-EINVAL); + } + + /* Alloc client handle */ + hw_fence_client = kzalloc(sizeof(*hw_fence_client), GFP_KERNEL); + if (!hw_fence_client) + return ERR_PTR(-ENOMEM); + kref_init(&hw_fence_client->kref); + + /* Avoid race condition if multiple-threads request same client at same time */ + mutex_lock(&hw_fence_drv_data->clients_register_lock); + if (hw_fence_drv_data->clients[client_id] && + kref_get_unless_zero(&hw_fence_drv_data->clients[client_id]->kref)) { + mutex_unlock(&hw_fence_drv_data->clients_register_lock); + HWFNC_DBG_INIT("client with id %d already registered\n", client_id); + kfree(hw_fence_client); + + /* Client already exists, return the pointer to the client and populate mem desc */ + hw_fence_client = hw_fence_drv_data->clients[client_id]; + + /* Init client memory descriptor */ + if (!IS_ERR_OR_NULL(mem_descriptor)) + memcpy(mem_descriptor, &hw_fence_client->mem_descriptor, + sizeof(struct msm_hw_fence_mem_addr)); + else + HWFNC_DBG_L("null mem descriptor, skipping copy\n"); + + return hw_fence_client; + } + + /* Mark client as registered */ + hw_fence_drv_data->clients[client_id] = hw_fence_client; + mutex_unlock(&hw_fence_drv_data->clients_register_lock); + + hw_fence_client->client_id = client_id; + hw_fence_client->client_id_ext = client_id_ext; + hw_fence_client->ipc_client_vid = + hw_fence_ipcc_get_client_virt_id(hw_fence_drv_data, client_id); + hw_fence_client->ipc_client_pid = + hw_fence_ipcc_get_client_phys_id(hw_fence_drv_data, client_id); + + if (hw_fence_client->ipc_client_vid <= 0 || hw_fence_client->ipc_client_pid <= 0) { + HWFNC_ERR("Failed to find client:%d ipc vid:%d pid:%d\n", client_id, + hw_fence_client->ipc_client_vid, hw_fence_client->ipc_client_pid); + ret = -EINVAL; + goto error; + } + + hw_fence_client->ipc_signal_id = hw_fence_ipcc_get_signal_id(hw_fence_drv_data, client_id); + if (hw_fence_client->ipc_signal_id < 0) { + HWFNC_ERR("Failed to find client:%d signal\n", client_id); + ret = -EINVAL; + goto error; + } + + hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id); + hw_fence_client->signaled_update_rxq = + hw_fence_ipcc_signaled_needs_rxq_update(hw_fence_drv_data, client_id); + hw_fence_client->signaled_send_ipc = hw_fence_ipcc_signaled_needs_ipc_irq(hw_fence_drv_data, + client_id); + hw_fence_client->txq_update_send_ipc = + hw_fence_ipcc_txq_update_needs_ipc_irq(hw_fence_drv_data, client_id); + + hw_fence_client->queues_num = hw_fence_utils_get_queues_num(hw_fence_drv_data, client_id); + if (!hw_fence_client->queues_num) { + HWFNC_ERR("client:%d invalid q_num:%d\n", client_id, hw_fence_client->queues_num); + ret = -EINVAL; + goto error; + } + if (hw_fence_client->queues_num < HW_FENCE_CLIENT_QUEUES) { + hw_fence_client->update_rxq = false; + hw_fence_client->signaled_update_rxq = false; + } + + hw_fence_client->skip_fctl_ref = hw_fence_utils_get_skip_fctl_ref(hw_fence_drv_data, + client_id); + + /* Alloc Client HFI Headers and Queues */ + ret = hw_fence_alloc_client_resources(hw_fence_drv_data, + hw_fence_client, mem_descriptor); + if (ret) + goto error; + + /* Initialize signal for communication with FenceCTL */ + ret = hw_fence_init_controller_signal(hw_fence_drv_data, hw_fence_client); + if (ret) + goto error; + + /* + * Update Fence Controller with the address of the Queues and + * the Fences Tables for this client + */ + ret = hw_fence_init_controller_resources(hw_fence_client); + if (ret) + goto error; + + hw_fence_client->context_id = dma_fence_context_alloc(1); + mutex_init(&hw_fence_client->error_cb_lock); + + HWFNC_DBG_INIT("Initialized ptr:0x%p client_id:%d q_num:%d ipc signal:%d vid:%d pid:%d\n", + hw_fence_client, hw_fence_client->client_id, hw_fence_client->queues_num, + hw_fence_client->ipc_signal_id, hw_fence_client->ipc_client_vid, + hw_fence_client->ipc_client_pid); + + HWFNC_DBG_INIT("update_rxq:%s signaled update_rxq:%s send_ipc:%s txq_update_send_ipc:%s\n", + hw_fence_client->update_rxq ? "true" : "false", + hw_fence_client->signaled_update_rxq ? "true" : "false", + hw_fence_client->signaled_send_ipc ? "true" : "false", + hw_fence_client->txq_update_send_ipc ? "true" : "false"); + +#if IS_ENABLED(CONFIG_DEBUG_FS) + init_waitqueue_head(&hw_fence_client->wait_queue); +#endif /* CONFIG_DEBUG_FS */ + + ret = _set_power_vote_if_needed(hw_fence_drv_data, hw_fence_client->client_id_ext, true); + if (ret) { + HWFNC_ERR("set soccp power vote failed, fail client:%u registration ret:%d\n", + hw_fence_client->client_id_ext, ret); + goto error; + } + + return (void *)hw_fence_client; +error: + + /* Free all the allocated resources */ + kref_put(&hw_fence_client->kref, msm_hw_fence_client_destroy); + + HWFNC_ERR("failed with error:%d\n", ret); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(msm_hw_fence_register); + +int msm_hw_fence_deregister(void *client_handle) +{ + struct msm_hw_fence_client *hw_fence_client; + bool destroyed_client; + u32 client_id; + int ret = 0; + + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + client_id = hw_fence_client->client_id_ext; + + if (hw_fence_client->client_id >= hw_fence_drv_data->clients_num) { + HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id); + return -EINVAL; + } + + HWFNC_DBG_H("+\n"); + + /* Free all the allocated resources */ + destroyed_client = kref_put(&hw_fence_client->kref, msm_hw_fence_client_destroy); + + if (destroyed_client) + ret = _set_power_vote_if_needed(hw_fence_drv_data, client_id, false); + if (ret) + HWFNC_ERR("remove soccp power vote failed, fail client:%u deregistration ret:%d\n", + hw_fence_client->client_id_ext, ret); + + HWFNC_DBG_H("-\n"); + + return 0; +} +EXPORT_SYMBOL_GPL(msm_hw_fence_deregister); + +int msm_hw_fence_create(void *client_handle, + struct msm_hw_fence_create_params *params) +{ + struct msm_hw_fence_client *hw_fence_client; + struct dma_fence_array *array; + struct dma_fence *fence; + int ret; + + ret = hw_fence_check_valid_fctl(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + if (!params || !params->handle) { + HWFNC_ERR("Invalid input\n"); + return -EINVAL; + } + + HWFNC_DBG_H("+\n"); + + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + fence = (struct dma_fence *)params->fence; + + /* if not provided, create a dma-fence */ + if (!fence) { + fence = hw_fence_internal_dma_fence_create(hw_fence_drv_data, hw_fence_client, + params->handle); + if (IS_ERR_OR_NULL(fence)) { + HWFNC_ERR("failed to create internal dma-fence for client:%d err:%ld\n", + hw_fence_client->client_id, PTR_ERR(fence)); + return PTR_ERR(fence); + } + + return 0; + } + + /* Block any Fence-Array, we should only get individual fences */ + array = to_dma_fence_array(fence); + if (array) { + HWFNC_ERR("HW Fence must be created for individual fences\n"); + return -EINVAL; + } + + /* This Fence is already a HW-Fence */ + if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + HWFNC_ERR("DMA Fence already has HW Fence Flag set\n"); + return -EINVAL; + } + + /* Create the HW Fence, i.e. add entry in the Global Table for this Fence */ + ret = hw_fence_create(hw_fence_drv_data, hw_fence_client, (u64)fence, fence->context, + fence->seqno, params->handle); + if (ret) { + HWFNC_ERR("Error creating HW fence\n"); + return ret; + } + + ret = hw_fence_add_callback(hw_fence_drv_data, fence, *params->handle); + if (ret) { + HWFNC_ERR("Fail to add dma-fence signal cb client:%d ctx:%llu seq:%llu ret:%d\n", + hw_fence_client->client_id, fence->context, fence->seqno, ret); + /* release both refs, one held by fctl and one held by creating client */ + hw_fence_destroy_refcount(hw_fence_drv_data, *params->handle, + HW_FENCE_FCTL_REFCOUNT); + hw_fence_destroy_with_hash(hw_fence_drv_data, hw_fence_client, *params->handle); + + return ret; + } + + /* If no error, set the HW Fence Flag in the dma-fence */ + set_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags); + + HWFNC_DBG_H("-\n"); + + return ret; +} +EXPORT_SYMBOL_GPL(msm_hw_fence_create); + +int msm_hw_fence_destroy(void *client_handle, + struct dma_fence *fence) +{ + struct msm_hw_fence_client *hw_fence_client; + struct dma_fence_array *array; + int ret; + + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + if (!fence) { + HWFNC_ERR("Invalid data\n"); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + HWFNC_DBG_H("+\n"); + + /* Block any Fence-Array, we should only get individual fences */ + array = to_dma_fence_array(fence); + if (array) { + HWFNC_ERR("HW Fence must be destroy for individual fences\n"); + return -EINVAL; + } + + /* This Fence not a HW-Fence */ + if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + HWFNC_ERR("DMA Fence is not a HW Fence flags:0x%lx\n", fence->flags); + return -EINVAL; + } + + if (dma_fence_is_hw_dma(fence)) { + HWFNC_ERR("deprecated api cannot destroy hw_dma_fence ctx:%llu seq:%llu\n", + fence->context, fence->seqno); + return -EINVAL; + } + + /* Destroy the HW Fence, i.e. remove entry in the Global Table for the Fence */ + ret = hw_fence_destroy(hw_fence_drv_data, hw_fence_client, (u64)fence, + fence->context, fence->seqno); + if (ret) { + HWFNC_ERR("Error destroying the HW fence\n"); + return ret; + } + + /* Clear the HW Fence Flag in the dma-fence */ + clear_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags); + + HWFNC_DBG_H("-\n"); + + return 0; +} +EXPORT_SYMBOL_GPL(msm_hw_fence_destroy); + +int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle) +{ + struct msm_hw_fence_client *hw_fence_client; + int ret; + + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + if (hw_fence_client->client_id >= hw_fence_drv_data->clients_num) { + HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id); + return -EINVAL; + } + + HWFNC_DBG_H("+\n"); + + /* Destroy the HW Fence, i.e. remove entry in the Global Table for the Fence */ + ret = hw_fence_destroy_with_hash(hw_fence_drv_data, hw_fence_client, handle); + if (ret) { + HWFNC_ERR("Error destroying the HW fence handle:%llu client_id:%d\n", handle, + hw_fence_client->client_id); + return ret; + } + + HWFNC_DBG_H("-\n"); + + return 0; +} +EXPORT_SYMBOL_GPL(msm_hw_fence_destroy_with_handle); + +int msm_hw_fence_wait_update_v2(void *client_handle, + struct dma_fence **fence_list, u64 *handles, u64 *client_data_list, u32 num_fences, + bool create) +{ + struct msm_hw_fence_client *hw_fence_client; + struct dma_fence_array *array; + int i, j, destroy_ret, ret = 0; + enum hw_fence_client_data_id data_id; + + ret = hw_fence_check_valid_fctl(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + if (!fence_list || !*fence_list) { + HWFNC_ERR("Invalid data\n"); + return -EINVAL; + } + + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext); + if (client_data_list && data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) { + HWFNC_ERR("Populating non-NULL client_data_list with invalid client_id_ext:%d\n", + hw_fence_client->client_id_ext); + return -EINVAL; + } + + HWFNC_DBG_H("+\n"); + + /* Process all the list of fences */ + for (i = 0; i < num_fences; i++) { + struct dma_fence *fence = fence_list[i]; + u64 hash, client_data = 0; + + if (client_data_list) + client_data = client_data_list[i]; + + /* Process a Fence-Array */ + array = to_dma_fence_array(fence); + if (array) { + ret = hw_fence_process_fence_array(hw_fence_drv_data, hw_fence_client, + array, &hash, client_data); + if (ret) { + HWFNC_ERR("Failed to process FenceArray\n"); + goto error; + } + } else { + /* Process individual Fence */ + ret = hw_fence_process_fence(hw_fence_drv_data, hw_fence_client, fence, + &hash, client_data); + if (ret) { + HWFNC_ERR("Failed to process Fence\n"); + goto error; + } + } + + if (handles) + handles[i] = hash; + } + + HWFNC_DBG_H("-\n"); + + return 0; +error: + for (j = 0; j < i; j++) { + destroy_ret = hw_fence_destroy_with_hash(hw_fence_drv_data, hw_fence_client, + handles[j]); + if (destroy_ret) + HWFNC_ERR("Failed decr fence ref ctx:%llu seq:%llu h:%llu idx:%d ret:%d\n", + fence_list[j] ? fence_list[j]->context : -1, fence_list[j] ? + fence_list[j]->seqno : -1, handles[j], j, destroy_ret); + } + + return ret; +} +EXPORT_SYMBOL_GPL(msm_hw_fence_wait_update_v2); + +int msm_hw_fence_wait_update(void *client_handle, + struct dma_fence **fence_list, u32 num_fences, bool create) +{ + u64 handle; + int i, ret = 0; + + for (i = 0; i < num_fences; i++) { + ret = msm_hw_fence_wait_update_v2(client_handle, &fence_list[i], &handle, NULL, + 1, create); + + if (ret) { + HWFNC_ERR("Failed reg for wait on fence ctx:%llu seq:%llu idx:%d ret:%d\n", + fence_list[i] ? fence_list[i]->context : -1, + fence_list[i] ? fence_list[i]->seqno : -1, i, ret); + return ret; + } + + /* decrement reference on hw-fence acquired by msm_hw_fence_wait_update_v2 call */ + ret = msm_hw_fence_destroy_with_handle(client_handle, handle); + if (ret) { + HWFNC_ERR("Failed decr fence ref ctx:%llu seq:%llu h:%llu idx:%d ret:%d\n", + fence_list[i] ? fence_list[i]->context : -1, + fence_list[i] ? fence_list[i]->seqno : -1, handle, i, ret); + return ret; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(msm_hw_fence_wait_update); + +int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) +{ + struct msm_hw_fence_client *hw_fence_client; + struct msm_hw_fence *hw_fences_tbl; + int i, ret; + + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + hw_fences_tbl = hw_fence_drv_data->hw_fences_tbl; + + HWFNC_DBG_L("reset fences and queues for client:%d\n", hw_fence_client->client_id); + for (i = 0; i < hw_fence_drv_data->hw_fences_tbl_cnt; i++) + hw_fence_utils_cleanup_fence(hw_fence_drv_data, hw_fence_client, + &hw_fences_tbl[i], i, reset_flags); + + hw_fence_utils_reset_queues(hw_fence_drv_data, hw_fence_client); + + return 0; +} +EXPORT_SYMBOL_GPL(msm_hw_fence_reset_client); + +int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id_ext, u32 reset_flags) +{ + enum hw_fence_client_id client_id; + int ret; + + ret = hw_fence_check_hw_fence_driver(hw_fence_drv_data); + if (ret) + return ret; + + if (client_id_ext >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid client_id_ext:%d\n", client_id_ext); + return -EINVAL; + } + + client_id = hw_fence_utils_get_client_id_priv(hw_fence_drv_data, client_id_ext); + + if (client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid client_id:%d client_id_ext:%d\n", client_id, client_id_ext); + return -EINVAL; + } + + return msm_hw_fence_reset_client(hw_fence_drv_data->clients[client_id], + reset_flags); +} +EXPORT_SYMBOL_GPL(msm_hw_fence_reset_client_by_id); + +int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error) +{ + struct msm_hw_fence_client *hw_fence_client; + int ret; + + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + if (handle >= hw_fence_drv_data->hw_fences_tbl_cnt) { + HWFNC_ERR("Invalid handle:%llu max:%d\n", handle, + hw_fence_drv_data->hw_fences_tbl_cnt); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + /* Write to Tx queue */ + hw_fence_update_queue(hw_fence_drv_data, hw_fence_client, + hw_fence_drv_data->hw_fences_tbl[handle].ctx_id, + hw_fence_drv_data->hw_fences_tbl[handle].seq_id, handle, + flags, 0, error, HW_FENCE_TX_QUEUE - 1); + + return 0; +} +EXPORT_SYMBOL_GPL(msm_hw_fence_update_txq); + + +int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, u32 update_flags) +{ + struct msm_hw_fence_client *hw_fence_client; + int ret; + + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + if ((handle >= hw_fence_drv_data->hw_fences_tbl_cnt) || !error) { + HWFNC_ERR("Invalid fence handle:%llu max:%d or error:%d\n", + handle, hw_fence_drv_data->hw_fences_tbl_cnt, error); + return -EINVAL; + } + + if (update_flags != MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE) { + HWFNC_ERR("invalid flags:0x%x expected:0x%lx no support of in-place error update\n", + update_flags, MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + /* Write to Tx queue */ + hw_fence_update_existing_txq_payload(hw_fence_drv_data, hw_fence_client, + handle, error); + + return 0; +} +EXPORT_SYMBOL_GPL(msm_hw_fence_update_txq_error); + +/* tx client has to be the physical, rx client virtual id*/ +int msm_hw_fence_trigger_signal(void *client_handle, + u32 tx_client_pid, u32 rx_client_vid, + u32 signal_id) +{ + struct msm_hw_fence_client *hw_fence_client; + int ret; + + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + HWFNC_DBG_H("sending ipc for client:%d\n", hw_fence_client->client_id); + hw_fence_ipcc_trigger_signal(hw_fence_drv_data, tx_client_pid, + rx_client_vid, signal_id); + + return 0; +} +EXPORT_SYMBOL_GPL(msm_hw_fence_trigger_signal); + +int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t cb, void *data) +{ + struct msm_hw_fence_client *hw_fence_client; + int ret; + + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + if (IS_ERR_OR_NULL(cb) || IS_ERR_OR_NULL(data)) { + HWFNC_ERR("Invalid params cb_func:0x%pK data:0x%pK\n", cb, data); + return -EINVAL; + } + + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + if (hw_fence_client->fence_error_cb) { + HWFNC_ERR("client_id:%d client_id_ext:%d already registered cb_func:%pK data:%pK\n", + hw_fence_client->client_id, hw_fence_client->client_id_ext, + hw_fence_client->fence_error_cb, hw_fence_client->fence_error_cb_userdata); + return -EINVAL; + } + + hw_fence_client->fence_error_cb_userdata = data; + hw_fence_client->fence_error_cb = cb; + + return 0; +} +EXPORT_SYMBOL_GPL(msm_hw_fence_register_error_cb); + +int msm_hw_fence_deregister_error_cb(void *client_handle) +{ + struct msm_hw_fence_client *hw_fence_client; + int ret = 0; + + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + if (!mutex_trylock(&hw_fence_client->error_cb_lock)) { + HWFNC_ERR("client_id:%d is modifying or using fence_error_cb:0x%pK data:0x%pK\n", + hw_fence_client->client_id, hw_fence_client->fence_error_cb, + hw_fence_client->fence_error_cb_userdata); + return -EAGAIN; + } + + if (!hw_fence_client->fence_error_cb) { + HWFNC_ERR("client_id:%d client_id_ext:%d did not register cb:%pK data:%pK\n", + hw_fence_client->client_id, hw_fence_client->client_id_ext, + hw_fence_client->fence_error_cb, hw_fence_client->fence_error_cb_userdata); + ret = -EINVAL; + goto exit; + } + + hw_fence_client->fence_error_cb = NULL; + hw_fence_client->fence_error_cb_userdata = NULL; + +exit: + mutex_unlock(&hw_fence_client->error_cb_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(msm_hw_fence_deregister_error_cb); + +#if IS_ENABLED(CONFIG_DEBUG_FS) +int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, u32 dump_clients_mask) +{ + struct msm_hw_fence_client *hw_fence_client; + int client_id, ret; + + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + if (dump_flags & MSM_HW_FENCE_DBG_DUMP_QUEUES) { + hw_fence_debug_dump_queues(hw_fence_drv_data, HW_FENCE_PRINTK, hw_fence_client); + + if (dump_clients_mask) + for (client_id = 0; client_id < HW_FENCE_CLIENT_MAX; client_id++) + if ((dump_clients_mask & (1 << client_id)) && + hw_fence_drv_data->clients[client_id]) + hw_fence_debug_dump_queues(hw_fence_drv_data, + HW_FENCE_PRINTK, + hw_fence_drv_data->clients[client_id]); + } + + if (dump_flags & MSM_HW_FENCE_DBG_DUMP_TABLE) + hw_fence_debug_dump_table(HW_FENCE_PRINTK, hw_fence_drv_data); + + if (dump_flags & MSM_HW_FENCE_DBG_DUMP_EVENTS) + hw_fence_debug_dump_events(HW_FENCE_PRINTK, hw_fence_drv_data); + + return 0; +} +EXPORT_SYMBOL_GPL(msm_hw_fence_dump_debug_data); + +int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence) +{ + struct msm_hw_fence_client *hw_fence_client; + struct msm_hw_fence *hw_fence; + u64 hash; + int ret; + + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + HWFNC_ERR("DMA Fence is not a HW Fence ctx:%llu seqno:%llu flags:0x%lx\n", + fence->context, fence->seqno, fence->flags); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + hw_fence = msm_hw_fence_find(hw_fence_drv_data, hw_fence_client, (u64)fence, fence->context, + fence->seqno, &hash); + if (!hw_fence) { + HWFNC_ERR("failed to find hw-fence client_id:%d fence:0x%pK ctx:%llu seqno:%llu\n", + hw_fence_client->client_id, fence, fence->context, fence->seqno); + return -EINVAL; + } + hw_fence_debug_dump_fence(HW_FENCE_PRINTK, hw_fence, hash, 0); + /* release refcount acquired by finding fence */ + msm_hw_fence_destroy_with_handle(client_handle, hash); + + return 0; +} +EXPORT_SYMBOL_GPL(msm_hw_fence_dump_fence); +#endif /* CONFIG_DEBUG_FS */ + +/* Function used for simulation purposes only. */ +int msm_hw_fence_driver_doorbell_sim(u64 db_mask) +{ + int ret; + + ret = hw_fence_check_hw_fence_driver(hw_fence_drv_data); + if (ret) + return ret; + + HWFNC_DBG_IRQ("db callback sim-mode flags:0x%llx qtime:%llu\n", + db_mask, hw_fence_get_qtime(hw_fence_drv_data)); + + hw_fence_utils_process_signaled_clients_mask(hw_fence_drv_data, db_mask); + + return 0; +} +EXPORT_SYMBOL_GPL(msm_hw_fence_driver_doorbell_sim); + +static int msm_hw_fence_probe_init(struct platform_device *pdev) +{ + int rc; + + HWFNC_DBG_H("+\n"); + + hw_fence_drv_data = kzalloc(sizeof(*hw_fence_drv_data), GFP_KERNEL); + if (!hw_fence_drv_data) + return -ENOMEM; + + dev_set_drvdata(&pdev->dev, hw_fence_drv_data); + hw_fence_drv_data->dev = &pdev->dev; + + if (hw_fence_driver_enable) { + /* Initialize HW Fence Driver resources */ + rc = hw_fence_init(hw_fence_drv_data); + if (rc) + goto error; + + mutex_init(&hw_fence_drv_data->clients_register_lock); + + /* set ready value so clients can register */ + hw_fence_drv_data->resources_ready = true; + } else { + /* check for presence of soccp */ + hw_fence_drv_data->has_soccp = + of_property_read_bool(hw_fence_drv_data->dev->of_node, "soccp_controller"); + + /* Allocate hw fence driver mem pool and share it with HYP */ + rc = hw_fence_utils_alloc_mem(hw_fence_drv_data); + if (rc) { + HWFNC_ERR_ONCE("failed to alloc base memory\n"); + goto error; + } + + HWFNC_DBG_INFO("hw fence driver not enabled\n"); + } + + HWFNC_DBG_H("-\n"); + + return rc; + +error: + dev_set_drvdata(&pdev->dev, NULL); + kfree(hw_fence_drv_data->ipc_clients_table); + kfree(hw_fence_drv_data->hw_fence_client_queue_size); + if (hw_fence_drv_data->cpu_addr_cookie) + dma_free_attrs(hw_fence_drv_data->dev, hw_fence_drv_data->size, + hw_fence_drv_data->cpu_addr_cookie, hw_fence_drv_data->res.start, + DMA_ATTR_NO_KERNEL_MAPPING); + kfree(hw_fence_drv_data); + hw_fence_drv_data = (void *) -EPROBE_DEFER; + + HWFNC_ERR_ONCE("error %d\n", rc); + + return rc; +} + +static int msm_hw_fence_probe(struct platform_device *pdev) +{ + int rc = -EINVAL; + + HWFNC_DBG_H("+\n"); + + if (!pdev) { + HWFNC_ERR("null platform dev\n"); + return -EINVAL; + } + + if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-hw-fence")) + rc = msm_hw_fence_probe_init(pdev); + if (rc) + goto err_exit; + + HWFNC_DBG_H("-\n"); + + return 0; + +err_exit: + HWFNC_ERR_ONCE("error %d\n", rc); + return rc; +} + +static int msm_hw_fence_remove(struct platform_device *pdev) +{ + struct hw_fence_soccp *soccp_props; + + HWFNC_DBG_H("+\n"); + + if (!pdev) { + HWFNC_ERR("null platform dev\n"); + return -EINVAL; + } + + hw_fence_drv_data = dev_get_drvdata(&pdev->dev); + if (!hw_fence_drv_data) { + HWFNC_ERR("null driver data\n"); + return -EINVAL; + } + soccp_props = &hw_fence_drv_data->soccp_props; + if (soccp_props->ssr_notifier) { + if (qcom_unregister_ssr_notifier(soccp_props->ssr_notifier, + &soccp_props->ssr_nb)) + HWFNC_ERR("failed to unregister soccp ssr notifier\n"); + } + + /* indicate listener thread should stop listening for interrupts from soccp */ + hw_fence_drv_data->has_soccp = false; + if (hw_fence_drv_data->soccp_listener_thread) + kthread_stop(hw_fence_drv_data->soccp_listener_thread); + + dev_set_drvdata(&pdev->dev, NULL); + + /* free memory allocations as part of hw_fence_drv_data */ + kfree(hw_fence_drv_data->ipc_clients_table); + kfree(hw_fence_drv_data->hw_fence_client_queue_size); + kfree(hw_fence_drv_data->hlos_key_tbl); + if (hw_fence_drv_data->cpu_addr_cookie) + dma_free_attrs(hw_fence_drv_data->dev, hw_fence_drv_data->size, + hw_fence_drv_data->cpu_addr_cookie, hw_fence_drv_data->res.start, + DMA_ATTR_NO_KERNEL_MAPPING); + kfree(hw_fence_drv_data); + hw_fence_drv_data = (void *) -EPROBE_DEFER; + + HWFNC_DBG_H("-\n"); + + return 0; +} + +static const struct of_device_id msm_hw_fence_dt_match[] = { + {.compatible = "qcom,msm-hw-fence"}, + {} +}; + +static struct platform_driver msm_hw_fence_driver = { + .probe = msm_hw_fence_probe, + .remove = msm_hw_fence_remove, + .driver = { + .name = "msm-hw-fence", + .of_match_table = of_match_ptr(msm_hw_fence_dt_match), + }, +}; + +static int __init msm_hw_fence_init(void) +{ + int rc = 0; + + HWFNC_DBG_H("+\n"); + + rc = platform_driver_register(&msm_hw_fence_driver); + if (rc) { + HWFNC_ERR("%s: failed to register platform driver\n", + __func__); + return rc; + } + + HWFNC_DBG_H("-\n"); + + return 0; +} + +static void __exit msm_hw_fence_exit(void) +{ + HWFNC_DBG_H("+\n"); + + platform_driver_unregister(&msm_hw_fence_driver); + + HWFNC_DBG_H("-\n"); +} + +module_param_named(enable, hw_fence_driver_enable, bool, 0600); +MODULE_PARM_DESC(enable, "Enable hardware fences"); + +module_init(msm_hw_fence_init); +module_exit(msm_hw_fence_exit); + +MODULE_DESCRIPTION("QTI HW Fence Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/mm-drivers/hw_fence/src/msm_hw_fence_synx_translation.c b/qcom/opensource/mm-drivers/hw_fence/src/msm_hw_fence_synx_translation.c new file mode 100644 index 0000000000..80abcf18ec --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/src/msm_hw_fence_synx_translation.c @@ -0,0 +1,525 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "msm_hw_fence.h" +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_debug.h" +#include "hw_fence_drv_interop.h" + +/** + * MAX_SUPPORTED_DPU0: Maximum number of dpu clients supported + * MAX_SUPPORTED_TEST: Maximum number of validation clients supported + */ +#define MAX_SUPPORTED_DPU0 (HW_FENCE_CLIENT_ID_CTL5 - HW_FENCE_CLIENT_ID_CTL0) +#define MAX_SUPPORTED_TEST (HW_FENCE_CLIENT_ID_VAL6 - HW_FENCE_CLIENT_ID_VAL0) + +static enum hw_fence_client_id _get_hw_fence_client_id(enum synx_client_id synx_client_id) +{ + enum hw_fence_client_id hw_fence_client_id; + + switch ((int)synx_client_id) { + case SYNX_CLIENT_HW_FENCE_GFX_CTX0: + hw_fence_client_id = HW_FENCE_CLIENT_ID_CTX0; + break; + case SYNX_CLIENT_HW_FENCE_IPE_CTX0 ... SYNX_CLIENT_HW_FENCE_IPE_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT - 1: + hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IPE_CTX0 + + HW_FENCE_CLIENT_ID_IPE; + break; + case SYNX_CLIENT_HW_FENCE_VID_CTX0 ... SYNX_CLIENT_HW_FENCE_VID_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT - 1: + hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_VID_CTX0 + + HW_FENCE_CLIENT_ID_VPU; + break; + case SYNX_CLIENT_HW_FENCE_DPU0_CTL0 ... SYNX_CLIENT_HW_FENCE_DPU0_CTL0 + MAX_SUPPORTED_DPU0: + hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_DPU0_CTL0 + + HW_FENCE_CLIENT_ID_CTL0; + break; + case SYNX_CLIENT_HW_FENCE_IPA_CTX0 ... SYNX_CLIENT_HW_FENCE_IPA_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT - 1: + hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IPA_CTX0 + + HW_FENCE_CLIENT_ID_IPA; + break; + case SYNX_CLIENT_HW_FENCE_IFE0_CTX0 ... SYNX_CLIENT_HW_FENCE_IFE11_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT - 1: + hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IFE0_CTX0 + + HW_FENCE_CLIENT_ID_IFE0; + break; + case SYNX_CLIENT_HW_FENCE_TEST_CTX0 ... SYNX_CLIENT_HW_FENCE_TEST_CTX0 + MAX_SUPPORTED_TEST: + hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_TEST_CTX0 + + HW_FENCE_CLIENT_ID_VAL0; + break; + default: + HWFNC_ERR("Unsupported hw-fence client for synx_id:%d\n", synx_client_id); + hw_fence_client_id = HW_FENCE_CLIENT_MAX; + break; + } + + return hw_fence_client_id; +} + +static bool is_hw_fence_client(enum synx_client_id synx_client_id) +{ + return synx_client_id >= SYNX_HW_FENCE_CLIENT_START + && synx_client_id < SYNX_HW_FENCE_CLIENT_END; +} + +struct synx_session *synx_hwfence_initialize(struct synx_initialization_params *params) +{ + struct synx_session *session = NULL; + enum hw_fence_client_id client_id; + void *client_handle; + + if (!hw_fence_driver_enable) + return ERR_PTR(-SYNX_INVALID); + + if (IS_ERR_OR_NULL(params)) { + HWFNC_ERR("invalid params:0x%pK\n", params); + return ERR_PTR(-SYNX_INVALID); + } + + client_id = _get_hw_fence_client_id(params->id); + if (!is_hw_fence_client(params->id) || client_id == HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Initializing session for invalid synx_id:%d\n", params->id); + return ERR_PTR(-SYNX_INVALID); + } + + session = kzalloc(sizeof(struct synx_session), GFP_KERNEL); + if (!session) + return ERR_PTR(-SYNX_NOMEM); + + client_handle = msm_hw_fence_register(client_id, + (struct msm_hw_fence_mem_addr *)params->ptr); + if (IS_ERR_OR_NULL(client_handle)) { + kfree(session); + HWFNC_ERR("failed to initialize synx_id:%d ret:%ld\n", params->id, + PTR_ERR(client_handle)); + return ERR_PTR(hw_fence_interop_to_synx_status(PTR_ERR(client_handle))); + } + session->client = client_handle; + session->type = params->id; + HWFNC_DBG_INIT("initialized session synx_id:%d hw_fence_id:%d\n", params->id, client_id); + + return session; +} +EXPORT_SYMBOL_GPL(synx_hwfence_initialize); + +static int synx_hwfence_uninitialize(struct synx_session *session) +{ + int ret; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type); + return -SYNX_INVALID; + } + + ret = msm_hw_fence_deregister(session->client); + if (ret) + HWFNC_ERR("Failed to deregister synx_id:%d ret:%d\n", session->type, ret); + else + kfree(session); + + return hw_fence_interop_to_synx_status(ret); +} + +static int synx_hwfence_create(struct synx_session *session, struct synx_create_params *params) +{ + int ret = 0; + struct msm_hw_fence_create_params hwfence_params; + u64 handle; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || + IS_ERR_OR_NULL(params)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d params:0x%pK\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type, params); + return -SYNX_INVALID; + } + + if (IS_ERR_OR_NULL(params->h_synx) || (params->flags > SYNX_CREATE_MAX_FLAGS) || + (params->flags & SYNX_CREATE_CSL_FENCE)) { + HWFNC_ERR("synx_id:%d invalid create params h_synx:0x%pK flags:0x%x\n", + session->type, params->h_synx, params->flags); + return -SYNX_INVALID; + } + + /* if SYNX_CREATE_DMA_FENCE specified and no dma-fence, fail */ + if (!params->fence && (params->flags & SYNX_CREATE_DMA_FENCE)) { + HWFNC_ERR("synx_id:%d invalid fence:%pK params flags:0x%x\n", + session->type, params->fence, params->flags); + return -SYNX_INVALID; + } + + hwfence_params.fence = params->fence; + hwfence_params.handle = &handle; + ret = msm_hw_fence_create(session->client, &hwfence_params); + if (ret) { + HWFNC_ERR("synx_id:%d failed create fence:0x%pK flags:0x%x ret:%d\n", session->type, + params->fence, params->flags, ret); + return hw_fence_interop_to_synx_status(ret); + } + if (handle > U32_MAX) { + HWFNC_ERR("synx_id:%d fence handle:%llu would overflow h_synx\n", session->type, + handle); + hw_fence_destroy_refcount(hw_fence_drv_data, handle, HW_FENCE_FCTL_REFCOUNT); + msm_hw_fence_destroy_with_handle(session->client, handle); + return -SYNX_INVALID; + } + *params->h_synx = SYNX_HW_FENCE_HANDLE_FLAG | handle; + + return SYNX_SUCCESS; +} + +static int synx_hwfence_release(struct synx_session *session, u32 h_synx) +{ + int ret; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || + !(h_synx & SYNX_HW_FENCE_HANDLE_FLAG)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type); + return -SYNX_INVALID; + } + + h_synx &= HW_FENCE_HANDLE_INDEX_MASK; + ret = msm_hw_fence_destroy_with_handle(session->client, h_synx); + if (ret) + HWFNC_ERR("synx_id:%d failed to destroy fence h_synx:%u ret:%d\n", session->type, + h_synx, ret); + + return hw_fence_interop_to_synx_status(ret); +} + +static int synx_hwfence_signal(struct synx_session *session, u32 h_synx, + enum synx_signal_status status) +{ + struct msm_hw_fence_client *hw_fence_client; + u32 error; + int ret; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || !session->client || + !(h_synx & SYNX_HW_FENCE_HANDLE_FLAG) || + !(status == SYNX_STATE_SIGNALED_SUCCESS || + status == SYNX_STATE_SIGNALED_CANCEL || + status > SYNX_STATE_SIGNALED_MAX)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d client:0x%pK h_synx:%u status:%u\n", + session, IS_ERR_OR_NULL(session) ? -1 : session->type, + IS_ERR_OR_NULL(session) ? NULL : session->client, h_synx, status); + return -SYNX_INVALID; + } + + error = hw_fence_interop_to_hw_fence_error(status); + h_synx &= HW_FENCE_HANDLE_INDEX_MASK; + ret = msm_hw_fence_update_txq(session->client, h_synx, 0, error); + if (ret) { + HWFNC_ERR("synx_id:%d failed to signal fence h_synx:%u status:%d ret:%d\n", + session->type, h_synx, status, ret); + goto error; + } + + hw_fence_client = (struct msm_hw_fence_client *)session->client; + if (hw_fence_client->txq_update_send_ipc) + hw_fence_ipcc_trigger_signal(hw_fence_drv_data, + hw_fence_client->ipc_client_pid, hw_fence_drv_data->ipcc_fctl_vid, + hw_fence_client->ipc_signal_id); + +error: + return hw_fence_interop_to_synx_status(ret); +} + +static int synx_hwfence_wait(struct synx_session *session, u32 h_synx, u64 timeout_ms) +{ + int ret = -EINVAL; + u32 error; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || + !(h_synx & SYNX_HW_FENCE_HANDLE_FLAG)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type); + return -SYNX_INVALID; + } + +#if IS_ENABLED(CONFIG_DEBUG_FS) + if (session->type >= SYNX_CLIENT_HW_FENCE_TEST_CTX0 + && session->type <= SYNX_CLIENT_HW_FENCE_TEST_CTX0 + MAX_SUPPORTED_TEST) + ret = hw_fence_debug_wait_val(hw_fence_drv_data, session->client, NULL, h_synx, + HW_FENCE_HANDLE_INDEX_MASK, timeout_ms, &error); +#endif /* CONFIG_DEBUG_FS */ + + if (ret) { + HWFNC_ERR("synx_id:%d failed to wait on fence h_synx:%u timeout_ms:%llu\n", + session->type, h_synx, timeout_ms); + return hw_fence_interop_to_synx_status(ret); + } + + return hw_fence_interop_to_synx_signal_status(MSM_HW_FENCE_FLAG_SIGNAL, error); +} + +int synx_hwfence_recover(enum synx_client_id id) +{ + int ret; + + if (!is_hw_fence_client(id)) { + HWFNC_ERR("invalid synx_id:%d\n", id); + return -SYNX_INVALID; + } + + ret = msm_hw_fence_reset_client_by_id(_get_hw_fence_client_id(id), + MSM_HW_FENCE_RESET_WITHOUT_DESTROY); + if (ret) + HWFNC_ERR("synx_id:%d failed to recover ret:%d\n", id, ret); + + return hw_fence_interop_to_synx_status(ret); +} +EXPORT_SYMBOL_GPL(synx_hwfence_recover); + +static void *synx_hwfence_get_fence(struct synx_session *session, u32 h_synx) +{ + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || + !(h_synx & SYNX_HW_FENCE_HANDLE_FLAG)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d h_synx:%u\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type, h_synx); + return ERR_PTR(-SYNX_INVALID); + } + + return (void *)hw_fence_interop_get_fence(h_synx); +} + +static int synx_hwfence_get_status(struct synx_session *session, u32 h_synx) +{ + u64 flags; + u32 error; + int ret; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || + !(h_synx & SYNX_HW_FENCE_HANDLE_FLAG)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d h_synx:%u\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type, h_synx); + return SYNX_STATE_INVALID; + } + + h_synx &= HW_FENCE_HANDLE_INDEX_MASK; + ret = hw_fence_get_flags_error(hw_fence_drv_data, h_synx, &flags, &error); + if (ret) { + HWFNC_ERR("Failed to get status for client:%d h_synx:%u\n", session->type, h_synx); + return SYNX_STATE_INVALID; + } + + return hw_fence_interop_to_synx_signal_status(flags, error); +} + +static int synx_hwfence_import_fence(void *client, struct synx_import_indv_params *params) +{ + struct dma_fence_array *array; + struct dma_fence *fence; + u64 handle; + int ret, i; + + ret = hw_fence_check_valid_fctl(hw_fence_drv_data, client); + if (ret) + return hw_fence_interop_to_synx_status(ret); + + fence = (struct dma_fence *)params->fence; + array = to_dma_fence_array(fence); + if (array) { + for (i = 0; i < array->num_fences; i++) { + if (dma_fence_is_array(array->fences[i])) { + HWFNC_ERR("nested fence arrays not supported idx:%d fence:0x%pK\n", + i, array->fences[i]); + ret = -SYNX_INVALID; + break; + } + + params->fence = array->fences[i]; + ret = hw_fence_interop_create_fence_from_import(params); + if (ret) { + HWFNC_ERR("failed to back dma_fence_array idx:%d fence:0x%pK\n", + i, array->fences[i]); + params->fence = fence; + break; + } + } + params->fence = fence; + } else { + ret = hw_fence_interop_create_fence_from_import(params); + } + + if (ret) { + HWFNC_ERR("failed to back dma-fence:0x%pK with hw-fence(s) ret:%d\n", + params->fence, ret); + return ret; + } + + ret = msm_hw_fence_wait_update_v2(client, (struct dma_fence **)¶ms->fence, &handle, + NULL, 1, true); + if (ret) { + HWFNC_ERR("failed to import fence:0x%pK flags:0x%x ret:%d\n", params->fence, + params->flags, ret); + goto error; + } + if (handle > U32_MAX) { + HWFNC_ERR("fence handle:%llu would overflow new_h_synx\n", handle); + msm_hw_fence_wait_update_v2(client, (struct dma_fence **)¶ms->fence, &handle, + NULL, 1, false); + return -SYNX_INVALID; + } + *params->new_h_synx = SYNX_HW_FENCE_HANDLE_FLAG | handle; + +error: + return hw_fence_interop_to_synx_status(ret); +} + +static int synx_hwfence_import_handle(void *client, struct synx_import_indv_params *params) +{ + struct synx_import_indv_params fence_params; + u32 h_synx; + int ret; + + if (!synx_interops.get_fence) { + HWFNC_ERR("invalid synx_get_fence:0x%pK\n", synx_interops.get_fence); + return -SYNX_INVALID; + } + h_synx = *(u32 *)params->fence; + if (h_synx & SYNX_HW_FENCE_HANDLE_FLAG) + fence_params.fence = hw_fence_interop_get_fence(h_synx); + else + fence_params.fence = synx_interops.get_fence(h_synx); + if (IS_ERR_OR_NULL(fence_params.fence)) { + HWFNC_ERR("failed to get native fence h_synx:%u ret:0x%pK\n", h_synx, + fence_params.fence); + return -SYNX_INVALID; + } + fence_params.new_h_synx = params->new_h_synx; + fence_params.flags = SYNX_IMPORT_DMA_FENCE; + ret = synx_hwfence_import_fence(client, &fence_params); + dma_fence_put(fence_params.fence); /* release dma-fence ref acquired by get_fence */ + + return ret; +} + +static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params *params) +{ + if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || + IS_ERR_OR_NULL(params->new_h_synx) || + !((params->flags & SYNX_IMPORT_DMA_FENCE) || + (params->flags & SYNX_IMPORT_SYNX_FENCE)) || + IS_ERR_OR_NULL(params->fence)) { + HWFNC_ERR("invalid client:0x%pK params:0x%pK h_synx:0x%pK flags:0x%x fence:0x%pK\n", + client, params, IS_ERR_OR_NULL(params) ? NULL : params->new_h_synx, + IS_ERR_OR_NULL(params) ? 0 : params->flags, + IS_ERR_OR_NULL(params) ? NULL : params->fence); + return -SYNX_INVALID; + } + + if (params->flags & SYNX_IMPORT_DMA_FENCE) + return synx_hwfence_import_fence(client, params); + else if (params->flags & SYNX_IMPORT_SYNX_FENCE) + return synx_hwfence_import_handle(client, params); + + HWFNC_ERR("invalid import flags:0x%x\n", params->flags); + + return -SYNX_INVALID; +} + +static int synx_hwfence_import_arr(void *client, struct synx_import_arr_params *params) +{ + int i, ret; + + if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || !params->num_fences) { + HWFNC_ERR("invalid import arr client:0x%pK params:0x%pK num_fences:%u\n", client, + params, IS_ERR_OR_NULL(params) ? -1 : params->num_fences); + return -SYNX_INVALID; + } + + for (i = 0; i < params->num_fences; i++) { + ret = synx_hwfence_import_indv(client, ¶ms->list[i]); + if (ret) { + HWFNC_ERR("importing fence[%u] 0x%pK failed ret:%d\n", i, + params->list[i].fence, ret); + return ret; + } + } + + return SYNX_SUCCESS; +} + +int synx_hwfence_import(struct synx_session *session, struct synx_import_params *params) +{ + int ret; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) + || IS_ERR_OR_NULL(params)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d params:0x%pK\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type, params); + return -SYNX_INVALID; + } + + if (params->type == SYNX_IMPORT_ARR_PARAMS) + ret = synx_hwfence_import_arr(session->client, ¶ms->arr); + else + ret = synx_hwfence_import_indv(session->client, ¶ms->indv); + + if (ret) + HWFNC_ERR("synx_id:%d failed to import type:%s fences ret:%d\n", session->type, + (params->type == SYNX_IMPORT_ARR_PARAMS) ? "arr" : "indv", ret); + + return ret; +} + +int synx_hwfence_init_ops(struct synx_ops *hwfence_ops) +{ + if (IS_ERR_OR_NULL(hwfence_ops)) { + HWFNC_ERR("invalid ops\n"); + return -SYNX_INVALID; + } + + hwfence_ops->uninitialize = synx_hwfence_uninitialize; + hwfence_ops->create = synx_hwfence_create; + hwfence_ops->release = synx_hwfence_release; + hwfence_ops->signal = synx_hwfence_signal; + hwfence_ops->import = synx_hwfence_import; + hwfence_ops->get_fence = synx_hwfence_get_fence; + hwfence_ops->get_status = synx_hwfence_get_status; + hwfence_ops->wait = synx_hwfence_wait; + + return SYNX_SUCCESS; +} +EXPORT_SYMBOL_GPL(synx_hwfence_init_ops); + +int synx_hwfence_enable_resources(enum synx_client_id id, enum synx_resource_type resource, + bool enable) +{ + int ret; + + if (!hw_fence_driver_enable) + return -SYNX_INVALID; + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -SYNX_INVALID; + } + + if (!is_hw_fence_client(id) || !(resource == SYNX_RESOURCE_SOCCP)) { + HWFNC_ERR("enabling hw-fence resources for invalid client id:%d res:%d enable:%d\n", + id, resource, enable); + return -SYNX_INVALID; + } + + if (!hw_fence_drv_data->has_soccp) + return SYNX_SUCCESS; + + ret = hw_fence_utils_set_power_vote(hw_fence_drv_data, enable); + if (ret) + HWFNC_ERR("Failed to vote for SOCCP state:%d\n", enable); + + return hw_fence_interop_to_synx_status(ret); +} +EXPORT_SYMBOL_GPL(synx_hwfence_enable_resources); diff --git a/qcom/opensource/mm-drivers/hw_fence/sun_defconfig b/qcom/opensource/mm-drivers/hw_fence/sun_defconfig new file mode 100644 index 0000000000..b39eb5efbe --- /dev/null +++ b/qcom/opensource/mm-drivers/hw_fence/sun_defconfig @@ -0,0 +1,2 @@ +CONFIG_QTI_HW_FENCE=y +CONFIG_QTI_HW_FENCE_USE_SYNX=y diff --git a/qcom/opensource/mm-drivers/mm_driver_board.mk b/qcom/opensource/mm-drivers/mm_driver_board.mk new file mode 100644 index 0000000000..7e18d8bc4e --- /dev/null +++ b/qcom/opensource/mm-drivers/mm_driver_board.mk @@ -0,0 +1,26 @@ +#SPDX-License-Identifier: GPL-2.0-only + +MM_DRV_DLKM_ENABLE := true +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) + ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false) + MM_DRV_DLKM_ENABLE := false + endif +endif + +ifeq ($(MM_DRV_DLKM_ENABLE), true) + ifneq ($(TARGET_BOARD_AUTO),true) + ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + ifneq ($(TARGET_BOARD_PLATFORM), taro) + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/sync_fence.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + endif + endif + endif +endif diff --git a/qcom/opensource/mm-drivers/mm_driver_product.mk b/qcom/opensource/mm-drivers/mm_driver_product.mk new file mode 100644 index 0000000000..bb98492d0a --- /dev/null +++ b/qcom/opensource/mm-drivers/mm_driver_product.mk @@ -0,0 +1,17 @@ + +PRODUCT_PACKAGES += msm_ext_display.ko + +MM_DRV_DLKM_ENABLE := true +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) + ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false) + MM_DRV_DLKM_ENABLE := false + endif +endif + +ifeq ($(MM_DRV_DLKM_ENABLE), true) + ifneq ($(TARGET_BOARD_PLATFORM), taro) + PRODUCT_PACKAGES += sync_fence.ko msm_hw_fence.ko + endif +endif + +DISPLAY_MM_DRIVER := msm_ext_display.ko sync_fence.ko msm_hw_fence.ko \ No newline at end of file diff --git a/qcom/opensource/mm-drivers/mm_drivers_kernel_headers.py b/qcom/opensource/mm-drivers/mm_drivers_kernel_headers.py new file mode 100644 index 0000000000..67885a9446 --- /dev/null +++ b/qcom/opensource/mm-drivers/mm_drivers_kernel_headers.py @@ -0,0 +1,95 @@ + # Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + # Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + # + # This program is free software; you can redistribute it and/or modify it + # under the terms of the GNU General Public License version 2 as published by + # the Free Software Foundation. + # + # This program is distributed in the hope that it will be useful, but WITHOUT + # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + # more details. + # + # You should have received a copy of the GNU General Public License along with + # this program. If not, see . + +import argparse +import filecmp +import os +import re +import subprocess +import sys + +def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h): + if not h.startswith(prefix): + print('error: expected prefix [%s] on header [%s]' % (prefix, h)) + return False + + out_h = os.path.join(gen_dir, h[len(prefix):]) + (out_h_dirname, out_h_basename) = os.path.split(out_h) + env = os.environ.copy() + env["LOC_UNIFDEF"] = unifdef + cmd = ["sh", headers_install, h, out_h] + + if True: + print('run_headers_install: cmd is %s' % cmd) + + result = subprocess.call(cmd, env=env) + + if result != 0: + print('error: run_headers_install: cmd %s failed %d' % (cmd, result)) + return False + return True + +def gen_mm_drivers_headers(verbose, gen_dir, headers_install, unifdef, mm_drivers_include_uapi): + error_count = 0 + for h in mm_drivers_include_uapi: + mm_drivers_uapi_include_prefix = os.path.join(h.split('sync_fence/include/uapi')[0], + 'sync_fence', 'include', 'uapi') + os.sep + if not run_headers_install( + verbose, gen_dir, headers_install, unifdef, + mm_drivers_uapi_include_prefix, h): error_count += 1 + return error_count + +def main(): + """Parse command line arguments and perform top level control.""" + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + + # Arguments that apply to every invocation of this script. + parser.add_argument( + '--verbose', action='store_true', + help='Print output that describes the workings of this script.') + parser.add_argument( + '--header_arch', required=True, + help='The arch for which to generate headers.') + parser.add_argument( + '--gen_dir', required=True, + help='Where to place the generated files.') + parser.add_argument( + '--mm_drivers_include_uapi', required=True, nargs='*', + help='The list of techpack/*/include/uapi header files.') + parser.add_argument( + '--headers_install', required=True, + help='The headers_install tool to process input headers.') + parser.add_argument( + '--unifdef', + required=True, + help='The unifdef tool used by headers_install.') + + args = parser.parse_args() + + if args.verbose: + print('header_arch [%s]' % args.header_arch) + print('gen_dir [%s]' % args.gen_dir) + print('mm_drivers_include_uapi [%s]' % args.mm_drivers_include_uapi) + print('headers_install [%s]' % args.headers_install) + print('unifdef [%s]' % args.unifdef) + + return gen_mm_drivers_headers(args.verbose, args.gen_dir, + args.headers_install, args.unifdef, args.mm_drivers_include_uapi) + +if __name__ == '__main__': + sys.exit(main()) + diff --git a/qcom/opensource/mm-drivers/msm_ext_display/Android.mk b/qcom/opensource/mm-drivers/msm_ext_display/Android.mk new file mode 100644 index 0000000000..cef996a482 --- /dev/null +++ b/qcom/opensource/mm-drivers/msm_ext_display/Android.mk @@ -0,0 +1,43 @@ +LOCAL_PATH := $(call my-dir) +LOCAL_MODULE_DDK_BUILD := true +include $(CLEAR_VARS) + +# This makefile is only for DLKM +ifneq ($(findstring vendor,$(LOCAL_PATH)),) + +ifneq ($(findstring opensource,$(LOCAL_PATH)),) + MSM_EXT_DISPLAY_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/msm_ext_display +endif # opensource + +DLKM_DIR := $(TOP)/device/qcom/common/dlkm + +LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) + +########################################################### +# This is set once per LOCAL_PATH, not per (kernel) module +KBUILD_OPTIONS := MSM_EXT_DISPLAY_ROOT=$(MSM_EXT_DISPLAY_BLD_DIR) +KBUILD_OPTIONS += MODNAME=msm_ext_display +KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) + +########################################################### +include $(CLEAR_VARS) +# For incremental compilation +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := msm-ext-disp-module-symvers +LOCAL_MODULE_STEM := Module.symvers +LOCAL_MODULE_KBUILD_NAME := Module.symvers +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk +########################################################### +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := msm_ext_display.ko +LOCAL_MODULE_KBUILD_NAME := msm_ext_display.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk +########################################################### +endif # DLKM check diff --git a/qcom/opensource/mm-drivers/msm_ext_display/BUILD.bazel b/qcom/opensource/mm-drivers/msm_ext_display/BUILD.bazel new file mode 100644 index 0000000000..0939b45466 --- /dev/null +++ b/qcom/opensource/mm-drivers/msm_ext_display/BUILD.bazel @@ -0,0 +1,16 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") +load(":define_msm_ext_display.bzl", "define_msm_ext_display") + +package( + default_visibility = [ + "//visibility:public" + ], +) + +ddk_headers( + name = "msm_ext_display_headers", + hdrs = glob(["include/*.h"]), + includes = ["include"] +) + +define_msm_ext_display() diff --git a/qcom/opensource/mm-drivers/msm_ext_display/Kbuild b/qcom/opensource/mm-drivers/msm_ext_display/Kbuild new file mode 100644 index 0000000000..a54149152b --- /dev/null +++ b/qcom/opensource/mm-drivers/msm_ext_display/Kbuild @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only + +include $(MSM_EXT_DISPLAY_ROOT)/config/kalamammdrivers.conf +LINUXINCLUDE += -include $(MSM_EXT_DISPLAY_ROOT)/config/kalamammdriversconf.h + +obj-m += msm_ext_display.o + +msm_ext_display-y := src/msm_ext_display.o + +CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" +EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \ + -Wformat-invalid-specifier -Wformat-zero-length -Wnonnull \ No newline at end of file diff --git a/qcom/opensource/mm-drivers/msm_ext_display/Kconfig b/qcom/opensource/mm-drivers/msm_ext_display/Kconfig new file mode 100644 index 0000000000..a7257e499a --- /dev/null +++ b/qcom/opensource/mm-drivers/msm_ext_display/Kconfig @@ -0,0 +1,4 @@ +config MSM_EXT_DISPLAY + bool "Enable msm_ext_display" + help + Enable msm_ext_display driver diff --git a/qcom/opensource/mm-drivers/msm_ext_display/Makefile b/qcom/opensource/mm-drivers/msm_ext_display/Makefile new file mode 100644 index 0000000000..31a8ce65bd --- /dev/null +++ b/qcom/opensource/mm-drivers/msm_ext_display/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 + +KBUILD_OPTIONS += MSM_EXT_DISPLAY_ROOT=$(KERNEL_SRC)/$(M)/../ + +all: modules + +modules_install: + $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install + +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) + +clean: + rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers + rm -rf .tmp_versions diff --git a/qcom/opensource/mm-drivers/msm_ext_display/defconfig b/qcom/opensource/mm-drivers/msm_ext_display/defconfig new file mode 100644 index 0000000000..53017a5990 --- /dev/null +++ b/qcom/opensource/mm-drivers/msm_ext_display/defconfig @@ -0,0 +1 @@ +CONFIG_MSM_EXT_DISPLAY=y diff --git a/qcom/opensource/mm-drivers/msm_ext_display/define_msm_ext_display.bzl b/qcom/opensource/mm-drivers/msm_ext_display/define_msm_ext_display.bzl new file mode 100644 index 0000000000..3287983898 --- /dev/null +++ b/qcom/opensource/mm-drivers/msm_ext_display/define_msm_ext_display.bzl @@ -0,0 +1,31 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_module", "ddk_submodule") +load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") +load("//msm-kernel:target_variants.bzl", "get_all_variants") + +def _define_module(target, variant): + tv = "{}_{}".format(target, variant) + ddk_module( + name = "{}_msm_ext_display".format(tv), + srcs = ["src/msm_ext_display.c"], + out = "msm_ext_display.ko", + defconfig = "defconfig", + kconfig = "Kconfig", + deps = ["//msm-kernel:all_headers", + "//vendor/qcom/opensource/mm-drivers:mm_drivers_headers"], + kernel_build = "//msm-kernel:{}".format(tv), + ) + + copy_to_dist_dir( + name = "{}_msm_ext_display_dist".format(tv), + data = [":{}_msm_ext_display".format(tv)], + dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target), + flat = True, + wipe_dist_dir = False, + allow_duplicate_filenames = False, + mode_overrides = {"**/*": "644"}, + log = "info", + ) + +def define_msm_ext_display(): + for (t, v) in get_all_variants(): + _define_module(t, v) diff --git a/qcom/opensource/mm-drivers/msm_ext_display/include/msm_ext_display.h b/qcom/opensource/mm-drivers/msm_ext_display/include/msm_ext_display.h new file mode 100644 index 0000000000..f6a8d10a3d --- /dev/null +++ b/qcom/opensource/mm-drivers/msm_ext_display/include/msm_ext_display.h @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _MSM_EXT_DISPLAY_H_ +#define _MSM_EXT_DISPLAY_H_ + +#include +#include +#include + +#define AUDIO_ACK_SET_ENABLE BIT(5) +#define AUDIO_ACK_ENABLE BIT(4) +#define AUDIO_ACK_CONNECT BIT(0) + +#define MSM_EXT_DISP_MAX_CODECS 2 + +/* + * Flags to be used with the HPD operation of the external display + * interface: + * MSM_EXT_DISP_HPD_AUDIO: audio will be routed to external display + * MSM_EXT_DISP_HPD_VIDEO: video will be routed to external display + */ +#define MSM_EXT_DISP_HPD_AUDIO BIT(0) +#define MSM_EXT_DISP_HPD_VIDEO BIT(1) + +/** + * struct ext_disp_cable_notify - cable notify handler structure + * @link: a link for the linked list + * @status: current status of HDMI/DP cable connection + * @hpd_notify: callback function to provide cable status + */ +struct ext_disp_cable_notify { + struct list_head link; + int status; + void (*hpd_notify)(struct ext_disp_cable_notify *h); +}; + +struct msm_ext_disp_audio_edid_blk { + u8 *audio_data_blk; + unsigned int audio_data_blk_size; /* in bytes */ + u8 *spk_alloc_data_blk; + unsigned int spk_alloc_data_blk_size; /* in bytes */ +}; + +struct msm_ext_disp_audio_setup_params { + u32 sample_rate_hz; + u32 num_of_channels; + u32 channel_allocation; + u32 level_shift; + bool down_mix; + u32 sample_present; +}; + +/* + * External Display identifier for use to determine which interface + * the audio driver is interacting with. + */ +enum msm_ext_disp_type { + EXT_DISPLAY_TYPE_HDMI = EXTCON_DISP_HDMI, + EXT_DISPLAY_TYPE_DP = EXTCON_DISP_DP, + EXT_DISPLAY_TYPE_MAX = 0xFFFFFFFF +}; + +/* + * External Display cable state used by display interface to indicate + * connect/disconnect of interface. + */ +enum msm_ext_disp_cable_state { + EXT_DISPLAY_CABLE_DISCONNECT, + EXT_DISPLAY_CABLE_CONNECT, + EXT_DISPLAY_CABLE_STATE_MAX +}; + +/** + * External Display power state used by display interface to indicate + * power on/off of the interface. + */ +enum msm_ext_disp_power_state { + EXT_DISPLAY_POWER_OFF, + EXT_DISPLAY_POWER_ON, + EXT_DISPLAY_POWER_MAX +}; + +/** + * struct msm_ext_disp_codec_id - codec information + * @type: external display type + * @ctrl_id: controller id + * @stream_id: stream_id + */ +struct msm_ext_disp_codec_id { + enum msm_ext_disp_type type; + int ctrl_id; + int stream_id; +}; + +/** + * struct msm_ext_disp_intf_ops - operations exposed to display interface + * @audio_config: configures the audio operations exposed to codec driver + * @audio_notify: notifies the audio connection state to user modules. + * @video_notify: notifies the video connection state to user modules. + */ +struct msm_ext_disp_intf_ops { + int (*audio_config)(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state state); + + int (*audio_notify)(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state state); + + int (*video_notify)(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state state); +}; + +/** + * struct msm_ext_disp_audio_codec_ops - operations exposed to audio codec + * @audio_info_setup: configure audio on interface + * @get_audio_edid_blk: retrieve audio edid block + * @cable_status: cable connected/disconnected + * @get_intf_id: id of connected interface + * @teardown_done: audio session teardown done by qdsp + * @acknowledge: acknowledge audio status received by user modules + * @ready: notify audio when codec driver is ready. + */ +struct msm_ext_disp_audio_codec_ops { + int (*audio_info_setup)(struct platform_device *pdev, + struct msm_ext_disp_audio_setup_params *params); + int (*get_audio_edid_blk)(struct platform_device *pdev, + struct msm_ext_disp_audio_edid_blk *blk); + int (*cable_status)(struct platform_device *pdev, u32 vote); + int (*get_intf_id)(struct platform_device *pdev); + void (*teardown_done)(struct platform_device *pdev); + int (*acknowledge)(struct platform_device *pdev, u32 ack); + int (*ready)(struct platform_device *pdev); +}; + +/** + * struct msm_ext_disp_init_data - data needed to register a display interface + * @type: external display type + * @intf_ops: external display interface operations + * @codec_ops: audio codec operations + * @pdev: platform device instance of the interface driver + * @intf_data: interface specific data + */ +struct msm_ext_disp_init_data { + struct msm_ext_disp_codec_id codec; + struct msm_ext_disp_intf_ops intf_ops; + struct msm_ext_disp_audio_codec_ops codec_ops; + struct platform_device *pdev; + void *intf_data; +}; + +/** + * struct msm_ext_disp_data - data needed by interface modules + * @intf_pdev: platform device instance of the interface + * @intf_data: data related to interface module + */ +struct msm_ext_disp_data { + struct platform_device *intf_pdev; + void *intf_data; +}; + +#if IS_ENABLED(CONFIG_MSM_EXT_DISPLAY) +/** + * msm_ext_disp_register_audio_codec() - audio codec registration + * @pdev: platform device pointer + * @codec_ops: audio codec operations + */ +int msm_ext_disp_register_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_audio_codec_ops *ops); + +/** + * msm_ext_disp_select_audio_codec() - select audio codec + * @pdev: platform device pointer + * @codec: codec id information + */ +int msm_ext_disp_select_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec); + +/** + * msm_hdmi_register_audio_codec() - wrapper for hdmi audio codec + * registration + * @pdev: platform device pointer + * @codec_ops: audio codec operations + */ +int msm_hdmi_register_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_audio_codec_ops *ops); + +/** + * msm_ext_disp_register_intf() - display interface registration + * @init_data: data needed to register the display interface + */ +int msm_ext_disp_register_intf(struct platform_device *pdev, + struct msm_ext_disp_init_data *init_data); + +/** + * msm_ext_disp_deregister_intf() - display interface deregistration + * @init_data: data needed to deregister the display interface + */ +int msm_ext_disp_deregister_intf(struct platform_device *pdev, + struct msm_ext_disp_init_data *init_data); + +#else +static inline int msm_ext_disp_register_audio_codec( + struct platform_device *pdev, + struct msm_ext_disp_audio_codec_ops *ops) +{ + return 0; +} + +static inline int msm_ext_disp_select_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec) +{ + return 0; +} + +static inline int msm_hdmi_register_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_audio_codec_ops *ops) +{ + return 0; +} + +static inline int msm_ext_disp_register_intf(struct platform_device *pdev, + struct msm_ext_disp_init_data *init_data) +{ + return 0; +} + +static inline int msm_ext_disp_deregister_intf(struct platform_device *pdev, + struct msm_ext_disp_init_data *init_data) +{ + return 0; +} +#endif + +#endif /*_MSM_EXT_DISPLAY_H_*/ diff --git a/qcom/opensource/mm-drivers/msm_ext_display/src/msm_ext_display.c b/qcom/opensource/mm-drivers/msm_ext_display/src/msm_ext_display.c new file mode 100644 index 0000000000..a239fc13d1 --- /dev/null +++ b/qcom/opensource/mm-drivers/msm_ext_display/src/msm_ext_display.c @@ -0,0 +1,702 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct msm_ext_disp_list { + struct msm_ext_disp_init_data *data; + struct list_head list; +}; + +struct msm_ext_disp { + struct msm_ext_disp_data ext_disp_data; + struct platform_device *pdev; + struct msm_ext_disp_codec_id current_codec; + struct msm_ext_disp_audio_codec_ops *ops; + struct extcon_dev *audio_sdev[MSM_EXT_DISP_MAX_CODECS]; + bool audio_session_on; + struct list_head display_list; + struct mutex lock; + bool update_audio; +}; + +static const unsigned int msm_ext_disp_supported_cable[] = { + EXTCON_DISP_DP, + EXTCON_DISP_HDMI, + EXTCON_NONE, +}; + +static int msm_ext_disp_extcon_register(struct msm_ext_disp *ext_disp, int id) +{ + int ret = 0; + + if (!ext_disp || !ext_disp->pdev || id >= MSM_EXT_DISP_MAX_CODECS) { + pr_err("invalid params\n"); + return -EINVAL; + } + + ext_disp->audio_sdev[id] = devm_extcon_dev_allocate( + &ext_disp->pdev->dev, + msm_ext_disp_supported_cable); + if (IS_ERR(ext_disp->audio_sdev[id])) + return PTR_ERR(ext_disp->audio_sdev[id]); + + ret = devm_extcon_dev_register(&ext_disp->pdev->dev, + ext_disp->audio_sdev[id]); + if (ret) { + pr_err("audio registration failed\n"); + return ret; + } + + pr_debug("extcon registration done\n"); + + return ret; +} + +static void msm_ext_disp_extcon_unregister(struct msm_ext_disp *ext_disp, + int id) +{ + if (!ext_disp || !ext_disp->pdev || id >= MSM_EXT_DISP_MAX_CODECS) { + pr_err("Invalid params\n"); + return; + } + + devm_extcon_dev_unregister(&ext_disp->pdev->dev, + ext_disp->audio_sdev[id]); +} + +static const char *msm_ext_disp_name(enum msm_ext_disp_type type) +{ + switch (type) { + case EXT_DISPLAY_TYPE_HDMI: + return "EXT_DISPLAY_TYPE_HDMI"; + case EXT_DISPLAY_TYPE_DP: + return "EXT_DISPLAY_TYPE_DP"; + default: return "???"; + } +} + +static int msm_ext_disp_add_intf_data(struct msm_ext_disp *ext_disp, + struct msm_ext_disp_init_data *data) +{ + struct msm_ext_disp_list *node; + + if (!ext_disp || !data) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + node->data = data; + + list_add(&node->list, &ext_disp->display_list); + + pr_debug("Added new display (%s) ctld (%d) stream (%d)\n", + msm_ext_disp_name(data->codec.type), + data->codec.ctrl_id, data->codec.stream_id); + + return 0; +} + +static int msm_ext_disp_remove_intf_data(struct msm_ext_disp *ext_disp, + struct msm_ext_disp_init_data *data) +{ + struct msm_ext_disp_list *node; + struct list_head *pos = NULL; + + if (!ext_disp || !data) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + list_for_each(pos, &ext_disp->display_list) { + node = list_entry(pos, struct msm_ext_disp_list, list); + if (node->data == data) { + list_del(pos); + pr_debug("Deleted the intf data\n"); + kfree(node); + return 0; + } + } + + pr_debug("Intf data not present for delete op\n"); + + return 0; +} + +static int msm_ext_disp_get_intf_data(struct msm_ext_disp *ext_disp, + struct msm_ext_disp_codec_id *codec, + struct msm_ext_disp_init_data **data) +{ + int ret = 0; + struct msm_ext_disp_list *node; + struct list_head *position = NULL; + + if (!ext_disp || !data || !codec) { + pr_err("Invalid params\n"); + ret = -EINVAL; + goto end; + } + + *data = NULL; + list_for_each(position, &ext_disp->display_list) { + node = list_entry(position, struct msm_ext_disp_list, list); + if (node->data->codec.type == codec->type && + node->data->codec.stream_id == codec->stream_id && + node->data->codec.ctrl_id == codec->ctrl_id) { + *data = node->data; + break; + } + } + + if (!*data) + ret = -ENODEV; +end: + return ret; +} + +static int msm_ext_disp_process_audio(struct msm_ext_disp *ext_disp, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state new_state) +{ + int ret = 0; + int state; + struct extcon_dev *audio_sdev; + + if (!ext_disp->ops) { + pr_err("codec not registered, skip notification\n"); + ret = -EPERM; + goto end; + } + + audio_sdev = ext_disp->audio_sdev[codec->stream_id]; + + state = extcon_get_state(audio_sdev, codec->type); + if (state == !!new_state) { + ret = -EEXIST; + pr_debug("same state\n"); + goto end; + } + + ret = extcon_set_state_sync(audio_sdev, + codec->type, !!new_state); + if (ret) + pr_err("Failed to set state. Error = %d\n", ret); + else + pr_debug("state changed to %d\n", new_state); + +end: + return ret; +} + +static struct msm_ext_disp *msm_ext_disp_validate_and_get( + struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state state) +{ + struct msm_ext_disp_data *ext_disp_data; + struct msm_ext_disp *ext_disp; + + if (!pdev) { + pr_err("invalid platform device\n"); + goto err; + } + + if (!codec || + codec->type >= EXT_DISPLAY_TYPE_MAX || + codec->ctrl_id != 0 || + codec->stream_id >= MSM_EXT_DISP_MAX_CODECS) { + pr_err("invalid display codec id\n"); + goto err; + } + + if (state < EXT_DISPLAY_CABLE_DISCONNECT || + state >= EXT_DISPLAY_CABLE_STATE_MAX) { + pr_err("invalid HPD state (%d)\n", state); + goto err; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("invalid drvdata\n"); + goto err; + } + + ext_disp = container_of(ext_disp_data, + struct msm_ext_disp, ext_disp_data); + + return ext_disp; +err: + return ERR_PTR(-EINVAL); +} + +static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp, + struct msm_ext_disp_codec_id *codec) +{ + int ret = 0; + struct msm_ext_disp_init_data *data = NULL; + + ret = msm_ext_disp_get_intf_data(ext_disp, codec, &data); + if (ret || !data) { + pr_err("Display not found (%s) ctld (%d) stream (%d)\n", + msm_ext_disp_name(codec->type), + codec->ctrl_id, codec->stream_id); + goto end; + } + + if (ext_disp->ops) { + *ext_disp->ops = data->codec_ops; + ext_disp->current_codec = *codec; + + /* update pdev for interface to use */ + ext_disp->ext_disp_data.intf_pdev = data->pdev; + ext_disp->ext_disp_data.intf_data = data->intf_data; + } + +end: + return ret; +} + +static int msm_ext_disp_audio_config(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state state) +{ + int ret = 0; + struct msm_ext_disp *ext_disp; + + ext_disp = msm_ext_disp_validate_and_get(pdev, codec, state); + if (IS_ERR(ext_disp)) { + ret = PTR_ERR(ext_disp); + goto end; + } + + if (state == EXT_DISPLAY_CABLE_CONNECT) { + ret = msm_ext_disp_select_audio_codec(pdev, codec); + } else { + mutex_lock(&ext_disp->lock); + if (ext_disp->ops) + memset(ext_disp->ops, 0, sizeof(*ext_disp->ops)); + + pr_debug("codec ops cleared for %s\n", + msm_ext_disp_name(ext_disp->current_codec.type)); + + ext_disp->current_codec.type = EXT_DISPLAY_TYPE_MAX; + mutex_unlock(&ext_disp->lock); + } +end: + return ret; +} + +static int msm_ext_disp_audio_notify(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state state) +{ + int ret = 0; + struct msm_ext_disp *ext_disp; + + ext_disp = msm_ext_disp_validate_and_get(pdev, codec, state); + if (IS_ERR(ext_disp)) { + ret = PTR_ERR(ext_disp); + goto end; + } + + mutex_lock(&ext_disp->lock); + ret = msm_ext_disp_process_audio(ext_disp, codec, state); + mutex_unlock(&ext_disp->lock); +end: + return ret; +} + +static void msm_ext_disp_ready_for_display(struct msm_ext_disp *ext_disp) +{ + int ret; + struct msm_ext_disp_init_data *data = NULL; + + if (!ext_disp) { + pr_err("invalid input\n"); + return; + } + + ret = msm_ext_disp_get_intf_data(ext_disp, + &ext_disp->current_codec, &data); + if (ret) { + pr_err("%s not found\n", + msm_ext_disp_name(ext_disp->current_codec.type)); + return; + } + + *ext_disp->ops = data->codec_ops; + data->codec_ops.ready(ext_disp->pdev); +} + +int msm_hdmi_register_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_audio_codec_ops *ops) +{ + return msm_ext_disp_register_audio_codec(pdev, ops); +} + +/** + * Register audio codec ops to display driver + * for HDMI/Display Port usecase support. + * + * @return 0 on success, negative value on error + * + */ +int msm_ext_disp_register_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_audio_codec_ops *ops) +{ + int ret = 0; + struct msm_ext_disp *ext_disp = NULL; + struct msm_ext_disp_data *ext_disp_data = NULL; + + if (!pdev || !ops) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("Invalid drvdata\n"); + return -EINVAL; + } + + ext_disp = container_of(ext_disp_data, struct msm_ext_disp, + ext_disp_data); + + mutex_lock(&ext_disp->lock); + + if (ext_disp->ops) { + pr_err("Codec already registered\n"); + ret = -EINVAL; + goto end; + } + + ext_disp->ops = ops; + + pr_debug("audio codec registered\n"); + + if (ext_disp->update_audio) { + ext_disp->update_audio = false; + msm_ext_disp_update_audio_ops(ext_disp, &ext_disp->current_codec); + msm_ext_disp_process_audio(ext_disp, &ext_disp->current_codec, + EXT_DISPLAY_CABLE_CONNECT); + } + +end: + mutex_unlock(&ext_disp->lock); + if (ext_disp->current_codec.type != EXT_DISPLAY_TYPE_MAX) + msm_ext_disp_ready_for_display(ext_disp); + + return ret; +} +EXPORT_SYMBOL_GPL(msm_ext_disp_register_audio_codec); + +int msm_ext_disp_select_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec) +{ + int ret = 0; + struct msm_ext_disp *ext_disp = NULL; + struct msm_ext_disp_data *ext_disp_data = NULL; + + if (!pdev || !codec) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("Invalid drvdata\n"); + return -EINVAL; + } + + ext_disp = container_of(ext_disp_data, struct msm_ext_disp, + ext_disp_data); + + mutex_lock(&ext_disp->lock); + + if (!ext_disp->ops) { + pr_warn("Codec is not registered\n"); + ext_disp->update_audio = true; + ext_disp->current_codec = *codec; + ret = -EINVAL; + goto end; + } + + ret = msm_ext_disp_update_audio_ops(ext_disp, codec); + +end: + mutex_unlock(&ext_disp->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(msm_ext_disp_select_audio_codec); + +static int msm_ext_disp_validate_intf(struct msm_ext_disp_init_data *init_data) +{ + struct msm_ext_disp_audio_codec_ops *ops; + + if (!init_data) { + pr_err("Invalid init_data\n"); + return -EINVAL; + } + + if (!init_data->pdev) { + pr_err("Invalid display intf pdev\n"); + return -EINVAL; + } + + if (init_data->codec.type >= EXT_DISPLAY_TYPE_MAX || + init_data->codec.ctrl_id != 0 || + init_data->codec.stream_id >= MSM_EXT_DISP_MAX_CODECS) { + pr_err("Invalid codec info type(%d), ctrl(%d) stream(%d)\n", + init_data->codec.type, + init_data->codec.ctrl_id, + init_data->codec.stream_id); + return -EINVAL; + } + + ops = &init_data->codec_ops; + + if (!ops->audio_info_setup || !ops->get_audio_edid_blk || + !ops->cable_status || !ops->get_intf_id || + !ops->teardown_done || !ops->acknowledge || + !ops->ready) { + pr_err("Invalid codec operation pointers\n"); + return -EINVAL; + } + + return 0; +} + +int msm_ext_disp_register_intf(struct platform_device *pdev, + struct msm_ext_disp_init_data *init_data) +{ + int ret = 0; + struct msm_ext_disp_init_data *data = NULL; + struct msm_ext_disp *ext_disp = NULL; + struct msm_ext_disp_data *ext_disp_data = NULL; + + if (!pdev || !init_data) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("Invalid drvdata\n"); + return -EINVAL; + } + + ext_disp = container_of(ext_disp_data, struct msm_ext_disp, + ext_disp_data); + + mutex_lock(&ext_disp->lock); + + ret = msm_ext_disp_validate_intf(init_data); + if (ret) + goto end; + + ret = msm_ext_disp_get_intf_data(ext_disp, &init_data->codec, &data); + if (!ret) { + pr_err("%s already registered. ctrl(%d) stream(%d)\n", + msm_ext_disp_name(init_data->codec.type), + init_data->codec.ctrl_id, + init_data->codec.stream_id); + goto end; + } + + ret = msm_ext_disp_add_intf_data(ext_disp, init_data); + if (ret) + goto end; + + init_data->intf_ops.audio_config = msm_ext_disp_audio_config; + init_data->intf_ops.audio_notify = msm_ext_disp_audio_notify; + + pr_debug("%s registered. ctrl(%d) stream(%d)\n", + msm_ext_disp_name(init_data->codec.type), + init_data->codec.ctrl_id, + init_data->codec.stream_id); +end: + mutex_unlock(&ext_disp->lock); + return ret; +} +EXPORT_SYMBOL_GPL(msm_ext_disp_register_intf); + +int msm_ext_disp_deregister_intf(struct platform_device *pdev, + struct msm_ext_disp_init_data *init_data) +{ + int ret = 0; + struct msm_ext_disp *ext_disp = NULL; + struct msm_ext_disp_data *ext_disp_data = NULL; + + if (!pdev || !init_data) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("Invalid drvdata\n"); + return -EINVAL; + } + + ext_disp = container_of(ext_disp_data, struct msm_ext_disp, + ext_disp_data); + + mutex_lock(&ext_disp->lock); + + ret = msm_ext_disp_remove_intf_data(ext_disp, init_data); + if (ret) + goto end; + + init_data->intf_ops.audio_config = NULL; + init_data->intf_ops.audio_notify = NULL; + + pr_debug("%s deregistered\n", + msm_ext_disp_name(init_data->codec.type)); +end: + mutex_unlock(&ext_disp->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(msm_ext_disp_deregister_intf); + +static int msm_ext_disp_probe(struct platform_device *pdev) +{ + int ret = 0, id; + struct device_node *of_node = NULL; + struct msm_ext_disp *ext_disp = NULL; + + if (!pdev) { + pr_err("No platform device found\n"); + ret = -ENODEV; + goto end; + } + + of_node = pdev->dev.of_node; + if (!of_node) { + pr_err("No device node found\n"); + ret = -ENODEV; + goto end; + } + + ext_disp = devm_kzalloc(&pdev->dev, sizeof(*ext_disp), GFP_KERNEL); + if (!ext_disp) { + ret = -ENOMEM; + goto end; + } + + platform_set_drvdata(pdev, &ext_disp->ext_disp_data); + ext_disp->pdev = pdev; + + for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++) { + ret = msm_ext_disp_extcon_register(ext_disp, id); + if (ret) + goto child_node_failure; + } + + ret = of_platform_populate(of_node, NULL, NULL, &pdev->dev); + if (ret) { + pr_err("Failed to add child devices. Error = %d\n", ret); + goto child_node_failure; + } else { + pr_debug("%s: Added child devices.\n", __func__); + } + + mutex_init(&ext_disp->lock); + + INIT_LIST_HEAD(&ext_disp->display_list); + ext_disp->current_codec.type = EXT_DISPLAY_TYPE_MAX; + ext_disp->update_audio = false; + + return ret; + +child_node_failure: + for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++) + msm_ext_disp_extcon_unregister(ext_disp, id); + + devm_kfree(&ext_disp->pdev->dev, ext_disp); +end: + return ret; +} + +static int msm_ext_disp_remove(struct platform_device *pdev) +{ + int ret = 0, id; + struct msm_ext_disp *ext_disp = NULL; + struct msm_ext_disp_data *ext_disp_data = NULL; + + if (!pdev) { + pr_err("No platform device\n"); + ret = -ENODEV; + goto end; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("No drvdata found\n"); + ret = -ENODEV; + goto end; + } + + ext_disp = container_of(ext_disp_data, struct msm_ext_disp, + ext_disp_data); + + for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++) + msm_ext_disp_extcon_unregister(ext_disp, id); + + mutex_destroy(&ext_disp->lock); + devm_kfree(&ext_disp->pdev->dev, ext_disp); + +end: + return ret; +} + +static const struct of_device_id msm_ext_dt_match[] = { + {.compatible = "qcom,msm-ext-disp",}, + { /* Sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, msm_ext_dt_match); + +static struct platform_driver this_driver = { + .probe = msm_ext_disp_probe, + .remove = msm_ext_disp_remove, + .driver = { + .name = "msm-ext-disp", + .of_match_table = msm_ext_dt_match, + }, +}; + +static int __init msm_ext_disp_init(void) +{ + int ret = 0; + + ret = platform_driver_register(&this_driver); + if (ret) + pr_err("failed, ret = %d\n", ret); + + return ret; +} + +subsys_initcall(msm_ext_disp_init); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MSM External Display"); diff --git a/qcom/opensource/mm-drivers/sync_fence/Android.mk b/qcom/opensource/mm-drivers/sync_fence/Android.mk new file mode 100644 index 0000000000..f041c70ef4 --- /dev/null +++ b/qcom/opensource/mm-drivers/sync_fence/Android.mk @@ -0,0 +1,42 @@ +LOCAL_PATH := $(call my-dir) +LOCAL_MODULE_DDK_BUILD := true +include $(CLEAR_VARS) + +# This makefile is only for DLKM +ifneq ($(findstring vendor,$(LOCAL_PATH)),) + +ifneq ($(findstring opensource,$(LOCAL_PATH)),) + SYNC_FENCE_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/sync_fence +endif # opensource + +DLKM_DIR := $(TOP)/device/qcom/common/dlkm + +LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) + +########################################################### +# This is set once per LOCAL_PATH, not per (kernel) module +KBUILD_OPTIONS := SYNC_FENCE_ROOT=$(SYNC_FENCE_BLD_DIR) +KBUILD_OPTIONS += MODNAME=sync_fence +KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) + +########################################################### +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := sync-fence-module-symvers +LOCAL_MODULE_STEM := Module.symvers +LOCAL_MODULE_KBUILD_NAME := Module.symvers +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk +########################################################### +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := sync_fence.ko +LOCAL_MODULE_KBUILD_NAME := sync_fence.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk +########################################################### +endif # DLKM check diff --git a/qcom/opensource/mm-drivers/sync_fence/BUILD.bazel b/qcom/opensource/mm-drivers/sync_fence/BUILD.bazel new file mode 100644 index 0000000000..ac0a95d918 --- /dev/null +++ b/qcom/opensource/mm-drivers/sync_fence/BUILD.bazel @@ -0,0 +1,22 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") +load(":define_sync_fence.bzl", "define_sync_fence") + +package( + default_visibility = [ + "//visibility:public" + ], +) + +ddk_headers( + name = "sync_fence_uapi_headers", + hdrs = glob(["include/uapi/sync_fence/*.h"]), + includes = ["include"] +) + +ddk_headers( + name = "sync_fence_headers", + hdrs = glob(["include/*.h"]), + includes = ["include"] +) + +define_sync_fence() diff --git a/qcom/opensource/mm-drivers/sync_fence/Kbuild b/qcom/opensource/mm-drivers/sync_fence/Kbuild new file mode 100644 index 0000000000..b1f9db20d7 --- /dev/null +++ b/qcom/opensource/mm-drivers/sync_fence/Kbuild @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only + +KDIR := $(TOP)/kernel_platform/msm-kernel +LINUXINCLUDE += -I$(SYNC_FENCE_ROOT)sync_fence/include/ +include $(SYNC_FENCE_ROOT)/config/kalamammdrivers.conf +LINUXINCLUDE += -include $(SYNC_FENCE_ROOT)/config/kalamammdriversconf.h + +ifdef CONFIG_QCOM_SPEC_SYNC +obj-m += sync_fence.o + +sync_fence-y := src/qcom_sync_file.o + +CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" +endif +EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \ + -Wformat-invalid-specifier -Wformat-zero-length -Wnonnull diff --git a/qcom/opensource/mm-drivers/sync_fence/Kconfig b/qcom/opensource/mm-drivers/sync_fence/Kconfig new file mode 100644 index 0000000000..6422d5cafa --- /dev/null +++ b/qcom/opensource/mm-drivers/sync_fence/Kconfig @@ -0,0 +1,4 @@ +config QCOM_SPEC_SYNC + bool "Enable spec fence" + help + Enable sync_fence driver \ No newline at end of file diff --git a/qcom/opensource/mm-drivers/sync_fence/Makefile b/qcom/opensource/mm-drivers/sync_fence/Makefile new file mode 100644 index 0000000000..ecd6ef1771 --- /dev/null +++ b/qcom/opensource/mm-drivers/sync_fence/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 + +KBUILD_OPTIONS += SYNC_FENCE_ROOT=$(KERNEL_SRC)/$(M)/../ + +all: modules + +modules_install: + $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install + +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) + +clean: + rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers + rm -rf .tmp_versions diff --git a/qcom/opensource/mm-drivers/sync_fence/defconfig b/qcom/opensource/mm-drivers/sync_fence/defconfig new file mode 100644 index 0000000000..33c414d0f9 --- /dev/null +++ b/qcom/opensource/mm-drivers/sync_fence/defconfig @@ -0,0 +1 @@ +CONFIG_QCOM_SPEC_SYNC=y diff --git a/qcom/opensource/mm-drivers/sync_fence/define_sync_fence.bzl b/qcom/opensource/mm-drivers/sync_fence/define_sync_fence.bzl new file mode 100644 index 0000000000..b7dcf21700 --- /dev/null +++ b/qcom/opensource/mm-drivers/sync_fence/define_sync_fence.bzl @@ -0,0 +1,33 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_module") +load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") +load("//msm-kernel:target_variants.bzl", "get_all_variants") + +def _define_module(target, variant): + tv = "{}_{}".format(target, variant) + ddk_module( + name = "{}_sync_fence".format(tv), + srcs = ["src/qcom_sync_file.c"], + out = "sync_fence.ko", + kconfig = "Kconfig", + defconfig = "defconfig", + deps = [ + "//msm-kernel:all_headers", + "//vendor/qcom/opensource/mm-drivers:mm_drivers_headers", + ], + kernel_build = "//msm-kernel:{}".format(tv), + ) + + copy_to_dist_dir( + name = "{}_sync_fence_dist".format(tv), + data = [":{}_sync_fence".format(tv)], + dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target), + flat = True, + wipe_dist_dir = False, + allow_duplicate_filenames = False, + mode_overrides = {"**/*": "644"}, + log = "info", + ) + +def define_sync_fence(): + for (t, v) in get_all_variants(): + _define_module(t, v) diff --git a/qcom/opensource/mm-drivers/sync_fence/include/qcom_sync_file.h b/qcom/opensource/mm-drivers/sync_fence/include/qcom_sync_file.h new file mode 100644 index 0000000000..5e6e541865 --- /dev/null +++ b/qcom/opensource/mm-drivers/sync_fence/include/qcom_sync_file.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _LINUX_QCOM_SPEC_SYNC_H +#define _LINUX_QCOM_SPEC_SYNC_H + +#include + +#define SPEC_FENCE_FLAG_FENCE_ARRAY 16 /* fence-array is speculative */ +#define SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND 17 /* fence-array is bound */ + +#if IS_ENABLED(CONFIG_QCOM_SPEC_SYNC) + +/** + * spec_sync_wait_bind_array() - Waits until the fence-array passed as parameter is bound. + * @fence_array: fence-array to wait-on until it is populated. + * @timeout_ms: timeout to wait. + * + * This function will wait until the fence-array passed as paremeter is bound; i.e. all the + * dma-fences that conform the fence-array are populated by the spec-fence driver bind ioctl. + * Once this function returns success, all the fences in the array should be valid. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_ms); + +#else + +static inline int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_ms) +{ + return -EINVAL; +} + +#endif /* CONFIG_QCOM_SPEC_SYNC */ + +#endif /* _LINUX_QCOM_SPEC_SYNC_H */ diff --git a/qcom/opensource/mm-drivers/sync_fence/include/uapi/Kbuild b/qcom/opensource/mm-drivers/sync_fence/include/uapi/Kbuild new file mode 100644 index 0000000000..f662bb6426 --- /dev/null +++ b/qcom/opensource/mm-drivers/sync_fence/include/uapi/Kbuild @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note + +# Top-level Makefile calls into asm-$(ARCH) +# List only non-arch directories below + +header-y += sync_fence/ diff --git a/qcom/opensource/mm-drivers/sync_fence/include/uapi/sync_fence/qcom_sync_file.h b/qcom/opensource/mm-drivers/sync_fence/include/uapi/sync_fence/qcom_sync_file.h new file mode 100644 index 0000000000..964e0f46f7 --- /dev/null +++ b/qcom/opensource/mm-drivers/sync_fence/include/uapi/sync_fence/qcom_sync_file.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _UAPI_LINUX_SPEC_SYNC_H +#define _UAPI_LINUX_SPEC_SYNC_H + +#include +#include + +#define SPEC_FENCE_SIGNAL_ANY 0x1 +#define SPEC_FENCE_SIGNAL_ALL 0x2 + +/** + * struct fence_bind_data - data passed to bind ioctl + * @out_bind_fd: file descriptor of second fence + * @fds: file descriptor list of child fences + */ +struct fence_bind_data { + __u32 out_bind_fd; + __u64 fds; +}; + +/** + * struct fence_create_data - detailed fence information + * @num_fences: Total fences that array needs to carry. + * @flags: Flags specifying on how to signal the array + * @out_bind_fd: Returns the fence fd. + */ +struct fence_create_data { + __u32 num_fences; + __u32 flags; + __u32 out_bind_fd; +}; + +#define SPEC_SYNC_MAGIC '>' + +/** + * DOC: SPEC_SYNC_IOC_BIND - bind two fences + * + * Takes a struct fence_bind_data. binds the child fds with the fence array + * pointed by fd1. + */ +#define SPEC_SYNC_IOC_BIND _IOWR(SPEC_SYNC_MAGIC, 3, struct fence_bind_data) + +/** + * DOC: SPEC_SYNC_IOC_CREATE_FENCE - Create a fence array + * + * Takes a struct fence_create_data. If num_fences is > 0, fence array will be + * created and returns the array fd in fence_create_data.fd1 + */ +#define SPEC_SYNC_IOC_CREATE_FENCE _IOWR(SPEC_SYNC_MAGIC, 4, struct fence_create_data) + +/** + * DOC: SPEC_SYNC_IOC_GET_VER - Get Spec driver version + * + * Returns Spec driver version. + */ +#define SPEC_SYNC_IOC_GET_VER _IOWR(SPEC_SYNC_MAGIC, 5, __u64) + +#endif /* _UAPI_LINUX_SPEC_SYNC_H */ diff --git a/qcom/opensource/mm-drivers/sync_fence/src/qcom_sync_file.c b/qcom/opensource/mm-drivers/sync_fence/src/qcom_sync_file.c new file mode 100644 index 0000000000..0bb94a97d3 --- /dev/null +++ b/qcom/opensource/mm-drivers/sync_fence/src/qcom_sync_file.c @@ -0,0 +1,596 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CLASS_NAME "sync" +#define DRV_NAME "spec_sync" +#define DRV_VERSION 1 +#define NAME_LEN 32 + +#define FENCE_MIN 1 +#define FENCE_MAX 32 + +#if IS_ENABLED(CONFIG_DEBUG_FS) + #define MAX_DEVICE_SUPPORTED 2 +#else + #define MAX_DEVICE_SUPPORTED 1 +#endif + +#define DUMMY_CONTEXT 0xfafadadafafadada +#define DUMMY_SEQNO 0xefa9ce00efa9ce00 + +struct dummy_spec_fence { + struct dma_fence fence; + spinlock_t lock; +}; + +struct sync_device { + /* device info */ + struct class *dev_class; + dev_t dev_num; + struct device *dev; + struct cdev *cdev; + struct mutex lock; + struct dummy_spec_fence *dummy_fence; + + /* device drv data */ + atomic_t device_available; + char name[NAME_LEN]; + uint32_t version; + struct mutex l_lock; + struct list_head fence_array_list; + wait_queue_head_t wait_queue; +}; + +struct fence_array_node { + struct dma_fence_array *fence_array; + struct list_head list; +}; + +/* Speculative Sync Device Driver State */ +static struct sync_device sync_dev; + +static const char *spec_fence_get_name_dummy(struct dma_fence *fence) +{ + return "dummy_fence"; +} + +static const struct dma_fence_ops dummy_spec_fence_ops = { + .get_driver_name = spec_fence_get_name_dummy, + .get_timeline_name = spec_fence_get_name_dummy, +}; + +static bool sanitize_fence_array(struct dma_fence_array *fence) +{ + struct fence_array_node *node; + int ret = false; + + mutex_lock(&sync_dev.l_lock); + list_for_each_entry(node, &sync_dev.fence_array_list, list) { + if (node->fence_array == fence) { + ret = true; + break; + } + } + mutex_unlock(&sync_dev.l_lock); + + return ret; +} + +static void clear_fence_array_tracker(bool force_clear) +{ + struct fence_array_node *node, *temp; + struct dma_fence_array *array; + struct dma_fence *fence; + bool is_signaled; + + mutex_lock(&sync_dev.l_lock); + list_for_each_entry_safe(node, temp, &sync_dev.fence_array_list, list) { + array = node->fence_array; + fence = &array->base; + is_signaled = dma_fence_is_signaled(fence); + + if (force_clear && !array->fences) + array->num_fences = 0; + + pr_debug("force_clear:%d is_signaled:%d pending:%d\n", force_clear, is_signaled, + atomic_read(&array->num_pending)); + + if (force_clear && !is_signaled && atomic_dec_and_test(&array->num_pending)) + dma_fence_signal(fence); + + if (force_clear || is_signaled) { + dma_fence_put(fence); + list_del(&node->list); + kfree(node); + } + } + mutex_unlock(&sync_dev.l_lock); +} + +static struct sync_device *spec_fence_init_locked(struct sync_device *obj, const char *name) +{ + if (atomic_read(&obj->device_available) >= MAX_DEVICE_SUPPORTED) { + pr_err("number of device fds are limited to %d, device opened:%d\n", + MAX_DEVICE_SUPPORTED, atomic_read(&obj->device_available)); + return NULL; + } else if (!atomic_read(&obj->device_available)) { + memset(obj->name, 0, NAME_LEN); + strscpy(obj->name, name, sizeof(obj->name)); + } + + atomic_inc(&obj->device_available); + + return obj; +} + +static int spec_sync_open(struct inode *inode, struct file *file) +{ + char task_comm[TASK_COMM_LEN]; + struct sync_device *obj = &sync_dev; + int ret = 0; + + if (!inode || !inode->i_cdev || !file) { + pr_err("NULL pointer passed\n"); + return -EINVAL; + } + + mutex_lock(&sync_dev.lock); + + get_task_comm(task_comm, current); + + obj = spec_fence_init_locked(obj, task_comm); + if (!obj) { + pr_err("Spec device exists owner:%s caller:%s\n", sync_dev.name, task_comm); + ret = -EEXIST; + goto end; + } + + file->private_data = obj; + +end: + mutex_unlock(&sync_dev.lock); + return ret; +} + +static int spec_sync_release(struct inode *inode, struct file *file) +{ + int ret = 0; + struct sync_device *obj = file->private_data; + + mutex_lock(&sync_dev.lock); + + if (!atomic_read(&obj->device_available)) { + pr_err("no device to release!!\n"); + ret = -ENODEV; + goto end; + } + + atomic_dec(&obj->device_available); + + if (!atomic_read(&obj->device_available)) + clear_fence_array_tracker(true); + +end: + mutex_unlock(&sync_dev.lock); + return ret; +} + +static int spec_sync_ioctl_get_ver(struct sync_device *obj, unsigned long __user arg) +{ + uint32_t version = obj->version; + + if (copy_to_user((void __user *)arg, &version, sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int spec_sync_create_array(struct fence_create_data *f) +{ + int fd = get_unused_fd_flags(O_CLOEXEC); + struct sync_file *sync_file; + struct dma_fence_array *fence_array; + struct fence_array_node *node; + struct dma_fence **fences; + struct dummy_spec_fence *dummy_fence_p = sync_dev.dummy_fence; + bool signal_any; + int i, ret = 0; + + if (fd < 0) { + pr_err("failed to get_unused_fd_flags\n"); + return fd; + } + + if (f->num_fences < FENCE_MIN || f->num_fences > FENCE_MAX) { + pr_err("invalid arguments num_fences:%d\n", f->num_fences); + ret = -ERANGE; + goto error_args; + } + + fences = kmalloc_array(f->num_fences, sizeof(void *), GFP_KERNEL|__GFP_ZERO); + if (!fences) { + ret = -ENOMEM; + goto error_args; + } + + for (i = 0; i < f->num_fences; i++) { + fences[i] = &dummy_fence_p->fence; + /* + * Increase dummy-fences refcount here, we must do this since any call to + * fence-array release while dummy-fences are the children of the fence-array + * will decrement the dummy_fence refcount. Therefore, to prevent the release + * of the dummy_fence fences, we must keep an extra refcount for every time that + * the fence-array->release can decrement its children's refcount. the extra + * refcount will be decreased impilictly when dma_fence_put(&fence_array->base) + * called. + */ + dma_fence_get(&dummy_fence_p->fence); + } + + signal_any = f->flags & SPEC_FENCE_SIGNAL_ALL ? false : true; + + fence_array = dma_fence_array_create(f->num_fences, fences, + dma_fence_context_alloc(1), 0, signal_any); + if (!fence_array) { + /* fence-array create failed, remove extra refcounts */ + for (i = 0; i < f->num_fences; i++) + dma_fence_put(&dummy_fence_p->fence); + + kfree(fences); + ret = -EINVAL; + goto error_args; + } + + /* Set the enable signal such that signalling is not done during wait*/ + set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence_array->base.flags); + set_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence_array->base.flags); + + sync_file = sync_file_create(&fence_array->base); + if (!sync_file) { + pr_err("sync_file_create fail\n"); + ret = -EINVAL; + goto err; + } + node = kzalloc((sizeof(struct fence_array_node)), GFP_KERNEL); + if (!node) { + fput(sync_file->file); + ret = -ENOMEM; + goto err; + } + + fd_install(fd, sync_file->file); + node->fence_array = fence_array; + + mutex_lock(&sync_dev.l_lock); + list_add_tail(&node->list, &sync_dev.fence_array_list); + mutex_unlock(&sync_dev.l_lock); + + pr_debug("spec fd:%d num_fences:%u\n", fd, f->num_fences); + return fd; + +err: + dma_fence_put(&fence_array->base); +error_args: + put_unused_fd(fd); + return ret; +} + +static int spec_sync_ioctl_create_fence(struct sync_device *obj, unsigned long __user arg) +{ + struct fence_create_data f; + int fd; + + if (copy_from_user(&f, (void __user *)arg, sizeof(f))) + return -EFAULT; + + fd = spec_sync_create_array(&f); + if (fd < 0) + return fd; + + f.out_bind_fd = fd; + + if (copy_to_user((void __user *)arg, &f, sizeof(f))) + return -EFAULT; + + return 0; +} + +int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_ms) +{ + int ret; + + /* Check if fence-array is a speculative fence */ + if (!fence_array || !test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence_array->base.flags)) { + pr_err("invalid fence!\n"); + return -EINVAL; + } else if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags)) { + /* This fence-array is already bound, just return success */ + return 0; + } + + /* Wait for the fence-array bind */ + ret = wait_event_timeout(sync_dev.wait_queue, + test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags), + msecs_to_jiffies(timeout_ms)); + if (!ret) { + pr_err("timed out waiting for bind fence-array %d\n", timeout_ms); + ret = -ETIMEDOUT; + } else { + ret = 0; + } + + return ret; +} +EXPORT_SYMBOL_GPL(spec_sync_wait_bind_array); + +static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) +{ + struct dma_fence_array *fence_array; + struct dma_fence *fence = NULL; + struct dma_fence *user_fence = NULL; + int *user_fds, ret = 0, i; + u32 num_fences; + + fence = sync_file_get_fence(sync_bind_info->out_bind_fd); + if (!fence) { + pr_err("dma fence failure out_fd:%d\n", sync_bind_info->out_bind_fd); + return -EINVAL; + } + + if (dma_fence_is_signaled(fence)) { + pr_err("spec fence is already signaled, out_fd:%d\n", + sync_bind_info->out_bind_fd); + ret = -EINVAL; + goto end; + } + + fence_array = container_of(fence, struct dma_fence_array, base); + if (!sanitize_fence_array(fence_array)) { + pr_err("spec fence not found in the registered list out_fd:%d\n", + sync_bind_info->out_bind_fd); + ret = -EINVAL; + goto end; + } + + num_fences = fence_array->num_fences; + + for (i = 0; i < num_fences; i++) { + if (!(fence_array->fences[i]->context == DUMMY_CONTEXT && + fence_array->fences[i]->seqno == DUMMY_SEQNO)) { + pr_err("fence array already populated, spec fd:%d status:%d flags:0x%lx\n", + sync_bind_info->out_bind_fd, dma_fence_get_status(fence), + fence->flags); + ret = -EINVAL; + goto end; + } + } + + user_fds = kzalloc(num_fences * (sizeof(int)), GFP_KERNEL); + if (!user_fds) { + ret = -ENOMEM; + goto end; + } + + if (copy_from_user(user_fds, (void __user *)sync_bind_info->fds, + num_fences * sizeof(int))) { + ret = -EFAULT; + goto out; + } + + spin_lock(fence->lock); + for (i = 0; i < num_fences; i++) { + user_fence = sync_file_get_fence(user_fds[i]); + if (!user_fence) { + pr_warn("bind fences are invalid !! user_fd:%d out_bind_fd:%d\n", + user_fds[i], sync_bind_info->out_bind_fd); + ret = -EINVAL; + goto bind_invalid; + } else if (user_fence->context == fence_array->base.context && + user_fence->seqno == fence_array->base.seqno) { + pr_err("invalid spec fence, ufd:%d o_b_fd:%d ctx:%lld seqno:%lld\n", + user_fds[i], sync_bind_info->out_bind_fd, + user_fence->context, user_fence->seqno); + ret = -EINVAL; + goto bind_invalid; + } + fence_array->fences[i] = user_fence; + /* + * At this point the fence-array fully contains valid fences and no more the + * dummy-fence, therefore, we must release the extra refcount that the + * creation of the speculative fence added to the dummy-fence. + */ + dma_fence_put(&sync_dev.dummy_fence->fence); + pr_debug("spec fd:%d i:%d bind fd:%d error:%d\n", sync_bind_info->out_bind_fd, + i, user_fds[i], fence_array->fences[i]->error); + } + + clear_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); + spin_unlock(fence->lock); + dma_fence_enable_sw_signaling(&fence_array->base); + + clear_fence_array_tracker(false); + +bind_invalid: + set_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags); + wake_up_all(&sync_dev.wait_queue); + + if (ret) { + dma_fence_set_error(fence, -EINVAL); + spin_unlock(fence->lock); + dma_fence_signal(fence); + clear_fence_array_tracker(false); + } +out: + kfree(user_fds); +end: + dma_fence_put(fence); + return ret; +} + +static int spec_sync_ioctl_bind(struct sync_device *obj, unsigned long __user arg) +{ + struct fence_bind_data sync_bind_info; + + if (copy_from_user(&sync_bind_info, (void __user *)arg, sizeof(struct fence_bind_data))) + return -EFAULT; + + if (sync_bind_info.out_bind_fd < 0) { + pr_err("Invalid out_fd:%d\n", sync_bind_info.out_bind_fd); + return -EINVAL; + } + + return spec_sync_bind_array(&sync_bind_info); +} + +static long spec_sync_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct sync_device *obj = file->private_data; + int ret = 0; + + switch (cmd) { + case SPEC_SYNC_IOC_CREATE_FENCE: + ret = spec_sync_ioctl_create_fence(obj, arg); + break; + case SPEC_SYNC_IOC_BIND: + ret = spec_sync_ioctl_bind(obj, arg); + break; + case SPEC_SYNC_IOC_GET_VER: + ret = spec_sync_ioctl_get_ver(obj, arg); + break; + default: + ret = -ENOTTY; + } + + return ret; +} + +const struct file_operations spec_sync_fops = { + .owner = THIS_MODULE, + .open = spec_sync_open, + .release = spec_sync_release, + .unlocked_ioctl = spec_sync_ioctl, +}; + +static int spec_sync_register_device(void) +{ + struct dummy_spec_fence *dummy_fence_p = NULL; + int ret; + +#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE) + sync_dev.dev_class = class_create(CLASS_NAME); +#else + sync_dev.dev_class = class_create(THIS_MODULE, CLASS_NAME); +#endif + if (sync_dev.dev_class == NULL) { + pr_err("%s: class_create fail.\n", __func__); + goto res_err; + } + + ret = alloc_chrdev_region(&sync_dev.dev_num, 0, 1, DRV_NAME); + if (ret) { + pr_err("%s: alloc_chrdev_region fail.\n", __func__); + goto alloc_chrdev_region_err; + } + + sync_dev.dev = device_create(sync_dev.dev_class, NULL, + sync_dev.dev_num, + &sync_dev, DRV_NAME); + if (IS_ERR(sync_dev.dev)) { + pr_err("%s: device_create fail.\n", __func__); + goto device_create_err; + } + + sync_dev.cdev = cdev_alloc(); + if (sync_dev.cdev == NULL) { + pr_err("%s: cdev_alloc fail.\n", __func__); + goto cdev_alloc_err; + } + cdev_init(sync_dev.cdev, &spec_sync_fops); + sync_dev.cdev->owner = THIS_MODULE; + + ret = cdev_add(sync_dev.cdev, sync_dev.dev_num, 1); + if (ret) { + pr_err("%s: cdev_add fail.\n", __func__); + goto cdev_add_err; + } + + sync_dev.version = DRV_VERSION; + mutex_init(&sync_dev.lock); + mutex_init(&sync_dev.l_lock); + INIT_LIST_HEAD(&sync_dev.fence_array_list); + init_waitqueue_head(&sync_dev.wait_queue); + + dummy_fence_p = kzalloc(sizeof(struct dummy_spec_fence), GFP_KERNEL); + if (!dummy_fence_p) { + ret = -ENOMEM; + goto cdev_add_err; + } + + spin_lock_init(&dummy_fence_p->lock); + dma_fence_init(&dummy_fence_p->fence, &dummy_spec_fence_ops, &dummy_fence_p->lock, + DUMMY_CONTEXT, DUMMY_SEQNO); + sync_dev.dummy_fence = dummy_fence_p; + + return 0; + +cdev_add_err: + cdev_del(sync_dev.cdev); +cdev_alloc_err: + device_destroy(sync_dev.dev_class, sync_dev.dev_num); +device_create_err: + unregister_chrdev_region(sync_dev.dev_num, 1); +alloc_chrdev_region_err: + class_destroy(sync_dev.dev_class); +res_err: + return -ENODEV; +} + +static int __init spec_sync_init(void) +{ + int ret = 0; + + ret = spec_sync_register_device(); + if (ret) { + pr_err("%s: speculative sync driver register fail.\n", __func__); + return ret; + } + return ret; +} + +static void __exit spec_sync_deinit(void) +{ + cdev_del(sync_dev.cdev); + device_destroy(sync_dev.dev_class, sync_dev.dev_num); + unregister_chrdev_region(sync_dev.dev_num, 1); + class_destroy(sync_dev.dev_class); + dma_fence_put(&sync_dev.dummy_fence->fence); +} + +module_init(spec_sync_init); +module_exit(spec_sync_deinit); + +MODULE_DESCRIPTION("QCOM Speculative Sync Driver"); +MODULE_LICENSE("GPL v2");