From f29d4d57e11c09e04f76d789e9b2cd78f69ec5d6 Mon Sep 17 00:00:00 2001 From: Shashank Babu Chinta Venkata Date: Fri, 19 Nov 2021 10:03:05 -0800 Subject: [PATCH 002/166] mm-drivers: sync-fence: add sync fence driver snapshot Add snapshot for syncfence driver in mm-drivers repo. Change-Id: I43556e3479b45399b1ac0e8ba7a423f36bb21cf9 Signed-off-by: Shashank Babu Chinta Venkata Signed-off-by: Jeykumar Sankaran --- sync_fence/include/uapi/Kbuild | 6 + .../include/uapi/sync_fence/qcom_sync_file.h | 63 +++ sync_fence/src/qcom_sync_file.c | 466 ++++++++++++++++++ 3 files changed, 535 insertions(+) create mode 100644 sync_fence/include/uapi/Kbuild create mode 100644 sync_fence/include/uapi/sync_fence/qcom_sync_file.h create mode 100644 sync_fence/src/qcom_sync_file.c diff --git a/sync_fence/include/uapi/Kbuild b/sync_fence/include/uapi/Kbuild new file mode 100644 index 0000000000..f662bb6426 --- /dev/null +++ b/sync_fence/include/uapi/Kbuild @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note + +# Top-level Makefile calls into asm-$(ARCH) +# List only non-arch directories below + +header-y += sync_fence/ diff --git a/sync_fence/include/uapi/sync_fence/qcom_sync_file.h b/sync_fence/include/uapi/sync_fence/qcom_sync_file.h new file mode 100644 index 0000000000..964e0f46f7 --- /dev/null +++ b/sync_fence/include/uapi/sync_fence/qcom_sync_file.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _UAPI_LINUX_SPEC_SYNC_H +#define _UAPI_LINUX_SPEC_SYNC_H + +#include +#include + +#define SPEC_FENCE_SIGNAL_ANY 0x1 +#define SPEC_FENCE_SIGNAL_ALL 0x2 + +/** + * struct fence_bind_data - data passed to bind ioctl + * @out_bind_fd: file descriptor of second fence + * @fds: file descriptor list of child fences + */ +struct fence_bind_data { + __u32 out_bind_fd; + __u64 fds; +}; + +/** + * struct fence_create_data - detailed fence information + * @num_fences: Total fences that array needs to carry. + * @flags: Flags specifying on how to signal the array + * @out_bind_fd: Returns the fence fd. + */ +struct fence_create_data { + __u32 num_fences; + __u32 flags; + __u32 out_bind_fd; +}; + +#define SPEC_SYNC_MAGIC '>' + +/** + * DOC: SPEC_SYNC_IOC_BIND - bind two fences + * + * Takes a struct fence_bind_data. binds the child fds with the fence array + * pointed by fd1. + */ +#define SPEC_SYNC_IOC_BIND _IOWR(SPEC_SYNC_MAGIC, 3, struct fence_bind_data) + +/** + * DOC: SPEC_SYNC_IOC_CREATE_FENCE - Create a fence array + * + * Takes a struct fence_create_data. If num_fences is > 0, fence array will be + * created and returns the array fd in fence_create_data.fd1 + */ +#define SPEC_SYNC_IOC_CREATE_FENCE _IOWR(SPEC_SYNC_MAGIC, 4, struct fence_create_data) + +/** + * DOC: SPEC_SYNC_IOC_GET_VER - Get Spec driver version + * + * Returns Spec driver version. + */ +#define SPEC_SYNC_IOC_GET_VER _IOWR(SPEC_SYNC_MAGIC, 5, __u64) + +#endif /* _UAPI_LINUX_SPEC_SYNC_H */ diff --git a/sync_fence/src/qcom_sync_file.c b/sync_fence/src/qcom_sync_file.c new file mode 100644 index 0000000000..3cb2178412 --- /dev/null +++ b/sync_fence/src/qcom_sync_file.c @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CLASS_NAME "sync" +#define DRV_NAME "spec_sync" +#define DRV_VERSION 1 +#define NAME_LEN 32 + +#define SPEC_FENCE_FLAG_FENCE_ARRAY 0x10 /* user flags for debug */ +#define FENCE_MIN 1 +#define FENCE_MAX 32 + +struct sync_device { + /* device info */ + struct class *dev_class; + dev_t dev_num; + struct device *dev; + struct cdev *cdev; + struct mutex lock; + + /* device drv data */ + atomic_t device_available; + char name[NAME_LEN]; + uint32_t version; + struct mutex l_lock; + struct list_head fence_array_list; +}; + +struct fence_array_node { + struct dma_fence_array *fence_array; + struct list_head list; +}; + +/* Speculative Sync Device Driver State */ +static struct sync_device sync_dev; + +static bool sanitize_fence_array(struct dma_fence_array *fence) +{ + struct fence_array_node *node; + int ret = false; + + mutex_lock(&sync_dev.l_lock); + list_for_each_entry(node, &sync_dev.fence_array_list, list) { + if (node->fence_array == fence) { + ret = true; + break; + } + } + mutex_unlock(&sync_dev.l_lock); + + return ret; +} + +static void clear_fence_array_tracker(bool force_clear) +{ + struct fence_array_node *node, *temp; + struct dma_fence_array *array; + struct dma_fence *fence; + bool is_signaled; + + mutex_lock(&sync_dev.l_lock); + list_for_each_entry_safe(node, temp, &sync_dev.fence_array_list, list) { + array = node->fence_array; + fence = &array->base; + is_signaled = dma_fence_is_signaled(fence); + + if (force_clear && !array->fences) + array->num_fences = 0; + + pr_debug("force_clear:%d is_signaled:%d pending:%d\n", force_clear, is_signaled, + atomic_read(&array->num_pending)); + + if (force_clear && !is_signaled && atomic_dec_and_test(&array->num_pending)) + dma_fence_signal(fence); + + if (force_clear || is_signaled) { + dma_fence_put(fence); + list_del(&node->list); + kfree(node); + } + } + mutex_unlock(&sync_dev.l_lock); +} + +static struct sync_device *spec_fence_init_locked(struct sync_device *obj, const char *name) +{ + if (atomic_read(&obj->device_available)) + return NULL; + + atomic_inc(&obj->device_available); + + memset(obj->name, 0, NAME_LEN); + strlcpy(obj->name, name, sizeof(obj->name)); + + return obj; +} + +static int spec_sync_open(struct inode *inode, struct file *file) +{ + char task_comm[TASK_COMM_LEN]; + struct sync_device *obj = &sync_dev; + int ret = 0; + + if (!inode || !inode->i_cdev || !file) { + pr_err("NULL pointer passed\n"); + return -EINVAL; + } + + mutex_lock(&sync_dev.lock); + + get_task_comm(task_comm, current); + + obj = spec_fence_init_locked(obj, task_comm); + if (!obj) { + pr_err("Spec device exists owner:%s caller:%s\n", sync_dev.name, task_comm); + ret = -EEXIST; + goto end; + } + + file->private_data = obj; + +end: + mutex_unlock(&sync_dev.lock); + return ret; +} + +static int spec_sync_release(struct inode *inode, struct file *file) +{ + int ret = 0; + struct sync_device *obj = file->private_data; + + mutex_lock(&sync_dev.lock); + + if (!atomic_read(&obj->device_available)) { + pr_err("sync release failed !!\n"); + ret = -ENODEV; + goto end; + } + + clear_fence_array_tracker(true); + atomic_dec(&obj->device_available); + +end: + mutex_unlock(&sync_dev.lock); + return ret; +} + +static int spec_sync_ioctl_get_ver(struct sync_device *obj, unsigned long __user arg) +{ + uint32_t version = obj->version; + + if (copy_to_user((void __user *)arg, &version, sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int spec_sync_create_array(struct fence_create_data *f) +{ + int fd = get_unused_fd_flags(O_CLOEXEC); + struct sync_file *sync_file; + struct dma_fence_array *fence_array; + struct fence_array_node *node; + bool signal_any; + int ret = 0; + + if (fd < 0) { + pr_err("failed to get_unused_fd_flags\n"); + return fd; + } + + if (f->num_fences < FENCE_MIN || f->num_fences > FENCE_MAX) { + pr_err("invalid arguments num_fences:%d\n", f->num_fences); + ret = -ERANGE; + goto error_args; + } + + signal_any = f->flags & SPEC_FENCE_SIGNAL_ALL ? false : true; + + fence_array = dma_fence_array_create(f->num_fences, NULL, + dma_fence_context_alloc(1), 0, signal_any); + + /* Set the enable signal such that signalling is not done during wait*/ + set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence_array->base.flags); + set_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence_array->base.flags); + + sync_file = sync_file_create(&fence_array->base); + if (!sync_file) { + pr_err("sync_file_create fail\n"); + ret = -EINVAL; + goto err; + } + node = kzalloc((sizeof(struct fence_array_node)), GFP_KERNEL); + if (!node) { + fput(sync_file->file); + ret = -ENOMEM; + goto err; + } + + fd_install(fd, sync_file->file); + node->fence_array = fence_array; + + mutex_lock(&sync_dev.l_lock); + list_add_tail(&node->list, &sync_dev.fence_array_list); + mutex_unlock(&sync_dev.l_lock); + + pr_debug("spec fd:%d num_fences:%u\n", fd, f->num_fences); + return fd; + +err: + dma_fence_put(&fence_array->base); +error_args: + put_unused_fd(fd); + return ret; +} + +static int spec_sync_ioctl_create_fence(struct sync_device *obj, unsigned long __user arg) +{ + struct fence_create_data f; + int fd; + + if (copy_from_user(&f, (void __user *)arg, sizeof(f))) + return -EFAULT; + + fd = spec_sync_create_array(&f); + if (fd < 0) + return fd; + + f.out_bind_fd = fd; + + if (copy_to_user((void __user *)arg, &f, sizeof(f))) + return -EFAULT; + + return 0; +} + +static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) +{ + struct dma_fence_array *fence_array; + struct dma_fence *fence = NULL; + struct dma_fence *user_fence = NULL; + struct dma_fence **fence_list; + int *user_fds, ret = 0, i; + u32 num_fences, counter; + + fence = sync_file_get_fence(sync_bind_info->out_bind_fd); + if (!fence) { + pr_err("dma fence failure out_fd:%d\n", sync_bind_info->out_bind_fd); + return -EINVAL; + } + + fence_array = container_of(fence, struct dma_fence_array, base); + if (!sanitize_fence_array(fence_array)) { + pr_err("spec fence not found in the registered list out_fd:%d\n", + sync_bind_info->out_bind_fd); + ret = -EINVAL; + goto end; + } + num_fences = fence_array->num_fences; + counter = num_fences; + + user_fds = kzalloc(num_fences * (sizeof(int)), GFP_KERNEL); + if (!user_fds) { + ret = -ENOMEM; + goto end; + } + + fence_list = kmalloc_array(num_fences, sizeof(void *), GFP_KERNEL|__GFP_ZERO); + if (!fence_list) { + ret = -ENOMEM; + goto out; + } + + if (copy_from_user(user_fds, (void __user *)sync_bind_info->fds, + num_fences * sizeof(int))) { + kfree(fence_list); + ret = -EFAULT; + goto out; + } + + fence_array->fences = fence_list; + for (i = 0; i < num_fences; i++) { + user_fence = sync_file_get_fence(user_fds[i]); + if (!user_fence) { + pr_err("bind fences are invalid !! user_fd:%d out_bind_fd:%d\n", + user_fds[i], sync_bind_info->out_bind_fd); + counter = i; + ret = -EINVAL; + goto bind_invalid; + } + fence_array->fences[i] = user_fence; + pr_debug("spec fd:%d i:%d bind fd:%d error:%d\n", sync_bind_info->out_bind_fd, + i, user_fds[i], fence_array->fences[i]->error); + } + + clear_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); + dma_fence_enable_sw_signaling(&fence_array->base); + + clear_fence_array_tracker(false); + +bind_invalid: + if (ret) { + for (i = counter - 1; i >= 0; i--) + dma_fence_put(fence_array->fences[i]); + + kfree(fence_list); + fence_array->fences = NULL; + fence_array->num_fences = 0; + dma_fence_set_error(fence, -EINVAL); + dma_fence_signal(fence); + clear_fence_array_tracker(false); + } +out: + kfree(user_fds); +end: + dma_fence_put(fence); + return ret; +} + +static int spec_sync_ioctl_bind(struct sync_device *obj, unsigned long __user arg) +{ + struct fence_bind_data sync_bind_info; + + if (copy_from_user(&sync_bind_info, (void __user *)arg, sizeof(struct fence_bind_data))) + return -EFAULT; + + if (sync_bind_info.out_bind_fd < 0) { + pr_err("Invalid out_fd:%d\n", sync_bind_info.out_bind_fd); + return -EINVAL; + } + + return spec_sync_bind_array(&sync_bind_info); +} + +static long spec_sync_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct sync_device *obj = file->private_data; + int ret = 0; + + switch (cmd) { + case SPEC_SYNC_IOC_CREATE_FENCE: + ret = spec_sync_ioctl_create_fence(obj, arg); + break; + case SPEC_SYNC_IOC_BIND: + ret = spec_sync_ioctl_bind(obj, arg); + break; + case SPEC_SYNC_IOC_GET_VER: + ret = spec_sync_ioctl_get_ver(obj, arg); + break; + default: + ret = -ENOTTY; + } + + return ret; +} + +const struct file_operations spec_sync_fops = { + .owner = THIS_MODULE, + .open = spec_sync_open, + .release = spec_sync_release, + .unlocked_ioctl = spec_sync_ioctl, +}; + +static int spec_sync_register_device(void) +{ + int ret; + + sync_dev.dev_class = class_create(THIS_MODULE, CLASS_NAME); + if (sync_dev.dev_class == NULL) { + pr_err("%s: class_create fail.\n", __func__); + goto res_err; + } + + ret = alloc_chrdev_region(&sync_dev.dev_num, 0, 1, DRV_NAME); + if (ret) { + pr_err("%s: alloc_chrdev_region fail.\n", __func__); + goto alloc_chrdev_region_err; + } + + sync_dev.dev = device_create(sync_dev.dev_class, NULL, + sync_dev.dev_num, + &sync_dev, DRV_NAME); + if (IS_ERR(sync_dev.dev)) { + pr_err("%s: device_create fail.\n", __func__); + goto device_create_err; + } + + sync_dev.cdev = cdev_alloc(); + if (sync_dev.cdev == NULL) { + pr_err("%s: cdev_alloc fail.\n", __func__); + goto cdev_alloc_err; + } + cdev_init(sync_dev.cdev, &spec_sync_fops); + sync_dev.cdev->owner = THIS_MODULE; + + ret = cdev_add(sync_dev.cdev, sync_dev.dev_num, 1); + if (ret) { + pr_err("%s: cdev_add fail.\n", __func__); + goto cdev_add_err; + } + + sync_dev.version = DRV_VERSION; + mutex_init(&sync_dev.lock); + mutex_init(&sync_dev.l_lock); + INIT_LIST_HEAD(&sync_dev.fence_array_list); + + return 0; + +cdev_add_err: + cdev_del(sync_dev.cdev); +cdev_alloc_err: + device_destroy(sync_dev.dev_class, sync_dev.dev_num); +device_create_err: + unregister_chrdev_region(sync_dev.dev_num, 1); +alloc_chrdev_region_err: + class_destroy(sync_dev.dev_class); +res_err: + return -ENODEV; +} + +static int __init spec_sync_init(void) +{ + int ret = 0; + + ret = spec_sync_register_device(); + if (ret) { + pr_err("%s: speculative sync driver register fail.\n", __func__); + return ret; + } + return ret; +} + +static void __exit spec_sync_deinit(void) +{ + cdev_del(sync_dev.cdev); + device_destroy(sync_dev.dev_class, sync_dev.dev_num); + unregister_chrdev_region(sync_dev.dev_num, 1); + class_destroy(sync_dev.dev_class); +} + +module_init(spec_sync_init); +module_exit(spec_sync_deinit); + +MODULE_DESCRIPTION("QCOM Speculative Sync Driver"); +MODULE_LICENSE("GPL v2"); From 03c0ab32cb33fc66a27aa09277d592b51bcfbd29 Mon Sep 17 00:00:00 2001 From: Shashank Babu Chinta Venkata Date: Fri, 19 Nov 2021 10:29:59 -0800 Subject: [PATCH 003/166] mm-drivers: msm_ext_display: add snapshot Add snapshot of msm_ext_display driver in mm-drivers repo. Change-Id: Iaf70f09d3a95f564e08105d33cdc26cbb4981048 Signed-off-by: Shashank Babu Chinta Venkata Signed-off-by: Jeykumar Sankaran --- msm_ext_display/src/msm_ext_display.c | 702 ++++++++++++++++++++++++++ 1 file changed, 702 insertions(+) create mode 100644 msm_ext_display/src/msm_ext_display.c diff --git a/msm_ext_display/src/msm_ext_display.c b/msm_ext_display/src/msm_ext_display.c new file mode 100644 index 0000000000..57da7fe2ee --- /dev/null +++ b/msm_ext_display/src/msm_ext_display.c @@ -0,0 +1,702 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct msm_ext_disp_list { + struct msm_ext_disp_init_data *data; + struct list_head list; +}; + +struct msm_ext_disp { + struct msm_ext_disp_data ext_disp_data; + struct platform_device *pdev; + struct msm_ext_disp_codec_id current_codec; + struct msm_ext_disp_audio_codec_ops *ops; + struct extcon_dev *audio_sdev[MSM_EXT_DISP_MAX_CODECS]; + bool audio_session_on; + struct list_head display_list; + struct mutex lock; + bool update_audio; +}; + +static const unsigned int msm_ext_disp_supported_cable[] = { + EXTCON_DISP_DP, + EXTCON_DISP_HDMI, + EXTCON_NONE, +}; + +static int msm_ext_disp_extcon_register(struct msm_ext_disp *ext_disp, int id) +{ + int ret = 0; + + if (!ext_disp || !ext_disp->pdev || id >= MSM_EXT_DISP_MAX_CODECS) { + pr_err("invalid params\n"); + return -EINVAL; + } + + ext_disp->audio_sdev[id] = devm_extcon_dev_allocate( + &ext_disp->pdev->dev, + msm_ext_disp_supported_cable); + if (IS_ERR(ext_disp->audio_sdev[id])) + return PTR_ERR(ext_disp->audio_sdev[id]); + + ret = devm_extcon_dev_register(&ext_disp->pdev->dev, + ext_disp->audio_sdev[id]); + if (ret) { + pr_err("audio registration failed\n"); + return ret; + } + + pr_debug("extcon registration done\n"); + + return ret; +} + +static void msm_ext_disp_extcon_unregister(struct msm_ext_disp *ext_disp, + int id) +{ + if (!ext_disp || !ext_disp->pdev || id >= MSM_EXT_DISP_MAX_CODECS) { + pr_err("Invalid params\n"); + return; + } + + devm_extcon_dev_unregister(&ext_disp->pdev->dev, + ext_disp->audio_sdev[id]); +} + +static const char *msm_ext_disp_name(enum msm_ext_disp_type type) +{ + switch (type) { + case EXT_DISPLAY_TYPE_HDMI: + return "EXT_DISPLAY_TYPE_HDMI"; + case EXT_DISPLAY_TYPE_DP: + return "EXT_DISPLAY_TYPE_DP"; + default: return "???"; + } +} + +static int msm_ext_disp_add_intf_data(struct msm_ext_disp *ext_disp, + struct msm_ext_disp_init_data *data) +{ + struct msm_ext_disp_list *node; + + if (!ext_disp || !data) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + node->data = data; + + list_add(&node->list, &ext_disp->display_list); + + pr_debug("Added new display (%s) ctld (%d) stream (%d)\n", + msm_ext_disp_name(data->codec.type), + data->codec.ctrl_id, data->codec.stream_id); + + return 0; +} + +static int msm_ext_disp_remove_intf_data(struct msm_ext_disp *ext_disp, + struct msm_ext_disp_init_data *data) +{ + struct msm_ext_disp_list *node; + struct list_head *pos = NULL; + + if (!ext_disp || !data) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + list_for_each(pos, &ext_disp->display_list) { + node = list_entry(pos, struct msm_ext_disp_list, list); + if (node->data == data) { + list_del(pos); + pr_debug("Deleted the intf data\n"); + kfree(node); + return 0; + } + } + + pr_debug("Intf data not present for delete op\n"); + + return 0; +} + +static int msm_ext_disp_get_intf_data(struct msm_ext_disp *ext_disp, + struct msm_ext_disp_codec_id *codec, + struct msm_ext_disp_init_data **data) +{ + int ret = 0; + struct msm_ext_disp_list *node; + struct list_head *position = NULL; + + if (!ext_disp || !data || !codec) { + pr_err("Invalid params\n"); + ret = -EINVAL; + goto end; + } + + *data = NULL; + list_for_each(position, &ext_disp->display_list) { + node = list_entry(position, struct msm_ext_disp_list, list); + if (node->data->codec.type == codec->type && + node->data->codec.stream_id == codec->stream_id && + node->data->codec.ctrl_id == codec->ctrl_id) { + *data = node->data; + break; + } + } + + if (!*data) + ret = -ENODEV; +end: + return ret; +} + +static int msm_ext_disp_process_audio(struct msm_ext_disp *ext_disp, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state new_state) +{ + int ret = 0; + int state; + struct extcon_dev *audio_sdev; + + if (!ext_disp->ops) { + pr_err("codec not registered, skip notification\n"); + ret = -EPERM; + goto end; + } + + audio_sdev = ext_disp->audio_sdev[codec->stream_id]; + + state = extcon_get_state(audio_sdev, codec->type); + if (state == !!new_state) { + ret = -EEXIST; + pr_debug("same state\n"); + goto end; + } + + ret = extcon_set_state_sync(audio_sdev, + codec->type, !!new_state); + if (ret) + pr_err("Failed to set state. Error = %d\n", ret); + else + pr_debug("state changed to %d\n", new_state); + +end: + return ret; +} + +static struct msm_ext_disp *msm_ext_disp_validate_and_get( + struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state state) +{ + struct msm_ext_disp_data *ext_disp_data; + struct msm_ext_disp *ext_disp; + + if (!pdev) { + pr_err("invalid platform device\n"); + goto err; + } + + if (!codec || + codec->type >= EXT_DISPLAY_TYPE_MAX || + codec->ctrl_id != 0 || + codec->stream_id >= MSM_EXT_DISP_MAX_CODECS) { + pr_err("invalid display codec id\n"); + goto err; + } + + if (state < EXT_DISPLAY_CABLE_DISCONNECT || + state >= EXT_DISPLAY_CABLE_STATE_MAX) { + pr_err("invalid HPD state (%d)\n", state); + goto err; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("invalid drvdata\n"); + goto err; + } + + ext_disp = container_of(ext_disp_data, + struct msm_ext_disp, ext_disp_data); + + return ext_disp; +err: + return ERR_PTR(-EINVAL); +} + +static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp, + struct msm_ext_disp_codec_id *codec) +{ + int ret = 0; + struct msm_ext_disp_init_data *data = NULL; + + ret = msm_ext_disp_get_intf_data(ext_disp, codec, &data); + if (ret || !data) { + pr_err("Display not found (%s) ctld (%d) stream (%d)\n", + msm_ext_disp_name(codec->type), + codec->ctrl_id, codec->stream_id); + goto end; + } + + if (ext_disp->ops) { + *ext_disp->ops = data->codec_ops; + ext_disp->current_codec = *codec; + + /* update pdev for interface to use */ + ext_disp->ext_disp_data.intf_pdev = data->pdev; + ext_disp->ext_disp_data.intf_data = data->intf_data; + } + +end: + return ret; +} + +static int msm_ext_disp_audio_config(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state state) +{ + int ret = 0; + struct msm_ext_disp *ext_disp; + + ext_disp = msm_ext_disp_validate_and_get(pdev, codec, state); + if (IS_ERR(ext_disp)) { + ret = PTR_ERR(ext_disp); + goto end; + } + + if (state == EXT_DISPLAY_CABLE_CONNECT) { + ret = msm_ext_disp_select_audio_codec(pdev, codec); + } else { + mutex_lock(&ext_disp->lock); + if (ext_disp->ops) + memset(ext_disp->ops, 0, sizeof(*ext_disp->ops)); + + pr_debug("codec ops cleared for %s\n", + msm_ext_disp_name(ext_disp->current_codec.type)); + + ext_disp->current_codec.type = EXT_DISPLAY_TYPE_MAX; + mutex_unlock(&ext_disp->lock); + } +end: + return ret; +} + +static int msm_ext_disp_audio_notify(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state state) +{ + int ret = 0; + struct msm_ext_disp *ext_disp; + + ext_disp = msm_ext_disp_validate_and_get(pdev, codec, state); + if (IS_ERR(ext_disp)) { + ret = PTR_ERR(ext_disp); + goto end; + } + + mutex_lock(&ext_disp->lock); + ret = msm_ext_disp_process_audio(ext_disp, codec, state); + mutex_unlock(&ext_disp->lock); +end: + return ret; +} + +static void msm_ext_disp_ready_for_display(struct msm_ext_disp *ext_disp) +{ + int ret; + struct msm_ext_disp_init_data *data = NULL; + + if (!ext_disp) { + pr_err("invalid input\n"); + return; + } + + ret = msm_ext_disp_get_intf_data(ext_disp, + &ext_disp->current_codec, &data); + if (ret) { + pr_err("%s not found\n", + msm_ext_disp_name(ext_disp->current_codec.type)); + return; + } + + *ext_disp->ops = data->codec_ops; + data->codec_ops.ready(ext_disp->pdev); +} + +int msm_hdmi_register_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_audio_codec_ops *ops) +{ + return msm_ext_disp_register_audio_codec(pdev, ops); +} + +/** + * Register audio codec ops to display driver + * for HDMI/Display Port usecase support. + * + * @return 0 on success, negative value on error + * + */ +int msm_ext_disp_register_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_audio_codec_ops *ops) +{ + int ret = 0; + struct msm_ext_disp *ext_disp = NULL; + struct msm_ext_disp_data *ext_disp_data = NULL; + + if (!pdev || !ops) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("Invalid drvdata\n"); + return -EINVAL; + } + + ext_disp = container_of(ext_disp_data, struct msm_ext_disp, + ext_disp_data); + + mutex_lock(&ext_disp->lock); + + if (ext_disp->ops) { + pr_err("Codec already registered\n"); + ret = -EINVAL; + goto end; + } + + ext_disp->ops = ops; + + pr_debug("audio codec registered\n"); + + if (ext_disp->update_audio) { + ext_disp->update_audio = false; + msm_ext_disp_update_audio_ops(ext_disp, &ext_disp->current_codec); + msm_ext_disp_process_audio(ext_disp, &ext_disp->current_codec, + EXT_DISPLAY_CABLE_CONNECT); + } + +end: + mutex_unlock(&ext_disp->lock); + if (ext_disp->current_codec.type != EXT_DISPLAY_TYPE_MAX) + msm_ext_disp_ready_for_display(ext_disp); + + return ret; +} +EXPORT_SYMBOL(msm_ext_disp_register_audio_codec); + +int msm_ext_disp_select_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec) +{ + int ret = 0; + struct msm_ext_disp *ext_disp = NULL; + struct msm_ext_disp_data *ext_disp_data = NULL; + + if (!pdev || !codec) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("Invalid drvdata\n"); + return -EINVAL; + } + + ext_disp = container_of(ext_disp_data, struct msm_ext_disp, + ext_disp_data); + + mutex_lock(&ext_disp->lock); + + if (!ext_disp->ops) { + pr_warn("Codec is not registered\n"); + ext_disp->update_audio = true; + ext_disp->current_codec = *codec; + ret = -EINVAL; + goto end; + } + + ret = msm_ext_disp_update_audio_ops(ext_disp, codec); + +end: + mutex_unlock(&ext_disp->lock); + + return ret; +} +EXPORT_SYMBOL(msm_ext_disp_select_audio_codec); + +static int msm_ext_disp_validate_intf(struct msm_ext_disp_init_data *init_data) +{ + struct msm_ext_disp_audio_codec_ops *ops; + + if (!init_data) { + pr_err("Invalid init_data\n"); + return -EINVAL; + } + + if (!init_data->pdev) { + pr_err("Invalid display intf pdev\n"); + return -EINVAL; + } + + if (init_data->codec.type >= EXT_DISPLAY_TYPE_MAX || + init_data->codec.ctrl_id != 0 || + init_data->codec.stream_id >= MSM_EXT_DISP_MAX_CODECS) { + pr_err("Invalid codec info type(%d), ctrl(%d) stream(%d)\n", + init_data->codec.type, + init_data->codec.ctrl_id, + init_data->codec.stream_id); + return -EINVAL; + } + + ops = &init_data->codec_ops; + + if (!ops->audio_info_setup || !ops->get_audio_edid_blk || + !ops->cable_status || !ops->get_intf_id || + !ops->teardown_done || !ops->acknowledge || + !ops->ready) { + pr_err("Invalid codec operation pointers\n"); + return -EINVAL; + } + + return 0; +} + +int msm_ext_disp_register_intf(struct platform_device *pdev, + struct msm_ext_disp_init_data *init_data) +{ + int ret = 0; + struct msm_ext_disp_init_data *data = NULL; + struct msm_ext_disp *ext_disp = NULL; + struct msm_ext_disp_data *ext_disp_data = NULL; + + if (!pdev || !init_data) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("Invalid drvdata\n"); + return -EINVAL; + } + + ext_disp = container_of(ext_disp_data, struct msm_ext_disp, + ext_disp_data); + + mutex_lock(&ext_disp->lock); + + ret = msm_ext_disp_validate_intf(init_data); + if (ret) + goto end; + + ret = msm_ext_disp_get_intf_data(ext_disp, &init_data->codec, &data); + if (!ret) { + pr_err("%s already registered. ctrl(%d) stream(%d)\n", + msm_ext_disp_name(init_data->codec.type), + init_data->codec.ctrl_id, + init_data->codec.stream_id); + goto end; + } + + ret = msm_ext_disp_add_intf_data(ext_disp, init_data); + if (ret) + goto end; + + init_data->intf_ops.audio_config = msm_ext_disp_audio_config; + init_data->intf_ops.audio_notify = msm_ext_disp_audio_notify; + + pr_debug("%s registered. ctrl(%d) stream(%d)\n", + msm_ext_disp_name(init_data->codec.type), + init_data->codec.ctrl_id, + init_data->codec.stream_id); +end: + mutex_unlock(&ext_disp->lock); + return ret; +} +EXPORT_SYMBOL(msm_ext_disp_register_intf); + +int msm_ext_disp_deregister_intf(struct platform_device *pdev, + struct msm_ext_disp_init_data *init_data) +{ + int ret = 0; + struct msm_ext_disp *ext_disp = NULL; + struct msm_ext_disp_data *ext_disp_data = NULL; + + if (!pdev || !init_data) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("Invalid drvdata\n"); + return -EINVAL; + } + + ext_disp = container_of(ext_disp_data, struct msm_ext_disp, + ext_disp_data); + + mutex_lock(&ext_disp->lock); + + ret = msm_ext_disp_remove_intf_data(ext_disp, init_data); + if (ret) + goto end; + + init_data->intf_ops.audio_config = NULL; + init_data->intf_ops.audio_notify = NULL; + + pr_debug("%s deregistered\n", + msm_ext_disp_name(init_data->codec.type)); +end: + mutex_unlock(&ext_disp->lock); + + return ret; +} +EXPORT_SYMBOL(msm_ext_disp_deregister_intf); + +static int msm_ext_disp_probe(struct platform_device *pdev) +{ + int ret = 0, id; + struct device_node *of_node = NULL; + struct msm_ext_disp *ext_disp = NULL; + + if (!pdev) { + pr_err("No platform device found\n"); + ret = -ENODEV; + goto end; + } + + of_node = pdev->dev.of_node; + if (!of_node) { + pr_err("No device node found\n"); + ret = -ENODEV; + goto end; + } + + ext_disp = devm_kzalloc(&pdev->dev, sizeof(*ext_disp), GFP_KERNEL); + if (!ext_disp) { + ret = -ENOMEM; + goto end; + } + + platform_set_drvdata(pdev, &ext_disp->ext_disp_data); + ext_disp->pdev = pdev; + + for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++) { + ret = msm_ext_disp_extcon_register(ext_disp, id); + if (ret) + goto child_node_failure; + } + + ret = of_platform_populate(of_node, NULL, NULL, &pdev->dev); + if (ret) { + pr_err("Failed to add child devices. Error = %d\n", ret); + goto child_node_failure; + } else { + pr_debug("%s: Added child devices.\n", __func__); + } + + mutex_init(&ext_disp->lock); + + INIT_LIST_HEAD(&ext_disp->display_list); + ext_disp->current_codec.type = EXT_DISPLAY_TYPE_MAX; + ext_disp->update_audio = false; + + return ret; + +child_node_failure: + for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++) + msm_ext_disp_extcon_unregister(ext_disp, id); + + devm_kfree(&ext_disp->pdev->dev, ext_disp); +end: + return ret; +} + +static int msm_ext_disp_remove(struct platform_device *pdev) +{ + int ret = 0, id; + struct msm_ext_disp *ext_disp = NULL; + struct msm_ext_disp_data *ext_disp_data = NULL; + + if (!pdev) { + pr_err("No platform device\n"); + ret = -ENODEV; + goto end; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("No drvdata found\n"); + ret = -ENODEV; + goto end; + } + + ext_disp = container_of(ext_disp_data, struct msm_ext_disp, + ext_disp_data); + + for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++) + msm_ext_disp_extcon_unregister(ext_disp, id); + + mutex_destroy(&ext_disp->lock); + devm_kfree(&ext_disp->pdev->dev, ext_disp); + +end: + return ret; +} + +static const struct of_device_id msm_ext_dt_match[] = { + {.compatible = "qcom,msm-ext-disp",}, + { /* Sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, msm_ext_dt_match); + +static struct platform_driver this_driver = { + .probe = msm_ext_disp_probe, + .remove = msm_ext_disp_remove, + .driver = { + .name = "msm-ext-disp", + .of_match_table = msm_ext_dt_match, + }, +}; + +static int __init msm_ext_disp_init(void) +{ + int ret = 0; + + ret = platform_driver_register(&this_driver); + if (ret) + pr_err("failed, ret = %d\n", ret); + + return ret; +} + +subsys_initcall(msm_ext_disp_init); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MSM External Display"); From 559a6bee2c2b577f3790475d8f2a4f43c94a2627 Mon Sep 17 00:00:00 2001 From: Jeykumar Sankaran Date: Wed, 15 Dec 2021 16:29:11 -0800 Subject: [PATCH 004/166] mm-drivers: enable mm-driver modules compilation Enable compilation of mm-driver modules and add scripts to copy the uapi header files. Change-Id: I0af6581ca96aa630c9707ef05abc4cccbfe92bab Signed-off-by: Jeykumar Sankaran --- Android.bp | 36 ++++++++++++++ Android.mk | 1 + config/kalamammdrivers.conf | 5 ++ config/kalamammdriversconf.h | 7 +++ mm_driver_board.mk | 12 +++++ mm_driver_product.mk | 3 ++ mm_drivers_kernel_headers.py | 95 ++++++++++++++++++++++++++++++++++++ msm_ext_display/Android.mk | 32 ++++++++++++ msm_ext_display/Kbuild | 10 ++++ msm_ext_display/Makefile | 15 ++++++ sync_fence/Android.mk | 32 ++++++++++++ sync_fence/Kbuild | 10 ++++ sync_fence/Makefile | 15 ++++++ 13 files changed, 273 insertions(+) create mode 100644 Android.bp create mode 100644 Android.mk create mode 100644 config/kalamammdrivers.conf create mode 100644 config/kalamammdriversconf.h create mode 100644 mm_driver_board.mk create mode 100644 mm_driver_product.mk create mode 100644 mm_drivers_kernel_headers.py create mode 100644 msm_ext_display/Android.mk create mode 100644 msm_ext_display/Kbuild create mode 100644 msm_ext_display/Makefile create mode 100644 sync_fence/Android.mk create mode 100644 sync_fence/Kbuild create mode 100644 sync_fence/Makefile diff --git a/Android.bp b/Android.bp new file mode 100644 index 0000000000..753cce932b --- /dev/null +++ b/Android.bp @@ -0,0 +1,36 @@ +headers_src = [ + "sync_fence/include/uapi/*/**/*.h", +] + +mm_drivers_headers_out = [ + "sync_fence/qcom_sync_file.h", +] + +mm_drivers_kernel_headers_verbose = "--verbose " +genrule { + name: "qti_generate_mm_drivers_kernel_headers", + tools: [ + "headers_install.sh", + "unifdef" + ], + tool_files: [ + "mm_drivers_kernel_headers.py", + ], + srcs: headers_src, + cmd: "python3 $(location mm_drivers_kernel_headers.py) " + + mm_drivers_kernel_headers_verbose + + "--header_arch arm64 " + + "--gen_dir $(genDir) " + + "--mm_drivers_include_uapi $(locations sync_fence/include/uapi/*/**/*.h) " + + "--unifdef $(location unifdef) " + + "--headers_install $(location headers_install.sh)", + out: mm_drivers_headers_out, +} + +cc_library_headers { + name: "qti_mm_drivers_kernel_headers", + generated_headers: ["qti_generate_mm_drivers_kernel_headers"], + export_generated_headers: ["qti_generate_mm_drivers_kernel_headers"], + vendor: true, + recovery_available: true +} diff --git a/Android.mk b/Android.mk new file mode 100644 index 0000000000..5053e7d643 --- /dev/null +++ b/Android.mk @@ -0,0 +1 @@ +include $(call all-subdir-makefiles) diff --git a/config/kalamammdrivers.conf b/config/kalamammdrivers.conf new file mode 100644 index 0000000000..3df22e75d5 --- /dev/null +++ b/config/kalamammdrivers.conf @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. +# Copyright (c) 2020, The Linux Foundation. All rights reserved. + +export CONFIG_MSM_EXT_DISPLAY=y diff --git a/config/kalamammdriversconf.h b/config/kalamammdriversconf.h new file mode 100644 index 0000000000..26ca25d02f --- /dev/null +++ b/config/kalamammdriversconf.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_MSM_EXT_DISPLAY 1 diff --git a/mm_driver_board.mk b/mm_driver_board.mk new file mode 100644 index 0000000000..4ee3326519 --- /dev/null +++ b/mm_driver_board.mk @@ -0,0 +1,12 @@ +#SPDX-License-Identifier: GPL-2.0-only + +ifneq ($(TARGET_BOARD_AUTO),true) + ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ + $(KERNEL_MODULES_OUT)/sync_fence.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ + $(KERNEL_MODULES_OUT)/sync_fence.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ + $(KERNEL_MODULES_OUT)/sync_fence.ko + endif +endif diff --git a/mm_driver_product.mk b/mm_driver_product.mk new file mode 100644 index 0000000000..2f0db285b4 --- /dev/null +++ b/mm_driver_product.mk @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only + +PRODUCT_PACKAGES += msm_ext_display.ko sync_fence.ko diff --git a/mm_drivers_kernel_headers.py b/mm_drivers_kernel_headers.py new file mode 100644 index 0000000000..67885a9446 --- /dev/null +++ b/mm_drivers_kernel_headers.py @@ -0,0 +1,95 @@ + # Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + # Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + # + # This program is free software; you can redistribute it and/or modify it + # under the terms of the GNU General Public License version 2 as published by + # the Free Software Foundation. + # + # This program is distributed in the hope that it will be useful, but WITHOUT + # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + # more details. + # + # You should have received a copy of the GNU General Public License along with + # this program. If not, see . + +import argparse +import filecmp +import os +import re +import subprocess +import sys + +def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h): + if not h.startswith(prefix): + print('error: expected prefix [%s] on header [%s]' % (prefix, h)) + return False + + out_h = os.path.join(gen_dir, h[len(prefix):]) + (out_h_dirname, out_h_basename) = os.path.split(out_h) + env = os.environ.copy() + env["LOC_UNIFDEF"] = unifdef + cmd = ["sh", headers_install, h, out_h] + + if True: + print('run_headers_install: cmd is %s' % cmd) + + result = subprocess.call(cmd, env=env) + + if result != 0: + print('error: run_headers_install: cmd %s failed %d' % (cmd, result)) + return False + return True + +def gen_mm_drivers_headers(verbose, gen_dir, headers_install, unifdef, mm_drivers_include_uapi): + error_count = 0 + for h in mm_drivers_include_uapi: + mm_drivers_uapi_include_prefix = os.path.join(h.split('sync_fence/include/uapi')[0], + 'sync_fence', 'include', 'uapi') + os.sep + if not run_headers_install( + verbose, gen_dir, headers_install, unifdef, + mm_drivers_uapi_include_prefix, h): error_count += 1 + return error_count + +def main(): + """Parse command line arguments and perform top level control.""" + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + + # Arguments that apply to every invocation of this script. + parser.add_argument( + '--verbose', action='store_true', + help='Print output that describes the workings of this script.') + parser.add_argument( + '--header_arch', required=True, + help='The arch for which to generate headers.') + parser.add_argument( + '--gen_dir', required=True, + help='Where to place the generated files.') + parser.add_argument( + '--mm_drivers_include_uapi', required=True, nargs='*', + help='The list of techpack/*/include/uapi header files.') + parser.add_argument( + '--headers_install', required=True, + help='The headers_install tool to process input headers.') + parser.add_argument( + '--unifdef', + required=True, + help='The unifdef tool used by headers_install.') + + args = parser.parse_args() + + if args.verbose: + print('header_arch [%s]' % args.header_arch) + print('gen_dir [%s]' % args.gen_dir) + print('mm_drivers_include_uapi [%s]' % args.mm_drivers_include_uapi) + print('headers_install [%s]' % args.headers_install) + print('unifdef [%s]' % args.unifdef) + + return gen_mm_drivers_headers(args.verbose, args.gen_dir, + args.headers_install, args.unifdef, args.mm_drivers_include_uapi) + +if __name__ == '__main__': + sys.exit(main()) + diff --git a/msm_ext_display/Android.mk b/msm_ext_display/Android.mk new file mode 100644 index 0000000000..feff0e4139 --- /dev/null +++ b/msm_ext_display/Android.mk @@ -0,0 +1,32 @@ +LOCAL_PATH := $(call my-dir) +include $(CLEAR_VARS) + +# This makefile is only for DLKM +ifneq ($(findstring vendor,$(LOCAL_PATH)),) + +ifneq ($(findstring opensource,$(LOCAL_PATH)),) + MSM_EXT_DISPLAY_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/msm_ext_display +endif # opensource + +DLKM_DIR := $(TOP)/device/qcom/common/dlkm + +LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) + +########################################################### +# This is set once per LOCAL_PATH, not per (kernel) module +KBUILD_OPTIONS := MSM_EXT_DISPLAY_ROOT=$(MSM_EXT_DISPLAY_BLD_DIR) +KBUILD_OPTIONS += MODNAME=msm_ext_display +KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) + +########################################################### +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := msm_ext_display.ko +LOCAL_MODULE_KBUILD_NAME := msm_ext_display.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk +########################################################### +endif # DLKM check diff --git a/msm_ext_display/Kbuild b/msm_ext_display/Kbuild new file mode 100644 index 0000000000..284134c0af --- /dev/null +++ b/msm_ext_display/Kbuild @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only + +include $(MSM_EXT_DISPLAY_ROOT)/config/kalamammdrivers.conf +LINUXINCLUDE += -include $(MSM_EXT_DISPLAY_ROOT)/config/kalamammdriversconf.h + +obj-m += msm_ext_display.o + +msm_ext_display-y := src/msm_ext_display.o + +CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/msm_ext_display/Makefile b/msm_ext_display/Makefile new file mode 100644 index 0000000000..31a8ce65bd --- /dev/null +++ b/msm_ext_display/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 + +KBUILD_OPTIONS += MSM_EXT_DISPLAY_ROOT=$(KERNEL_SRC)/$(M)/../ + +all: modules + +modules_install: + $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install + +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) + +clean: + rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers + rm -rf .tmp_versions diff --git a/sync_fence/Android.mk b/sync_fence/Android.mk new file mode 100644 index 0000000000..59ee256f05 --- /dev/null +++ b/sync_fence/Android.mk @@ -0,0 +1,32 @@ +LOCAL_PATH := $(call my-dir) +include $(CLEAR_VARS) + +# This makefile is only for DLKM +ifneq ($(findstring vendor,$(LOCAL_PATH)),) + +ifneq ($(findstring opensource,$(LOCAL_PATH)),) + SYNC_FENCE_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/sync_fence +endif # opensource + +DLKM_DIR := $(TOP)/device/qcom/common/dlkm + +LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) + +########################################################### +# This is set once per LOCAL_PATH, not per (kernel) module +KBUILD_OPTIONS := SYNC_FENCE_ROOT=$(SYNC_FENCE_BLD_DIR) +KBUILD_OPTIONS += MODNAME=sync_fence +KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) + +########################################################### +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := sync_fence.ko +LOCAL_MODULE_KBUILD_NAME := sync_fence.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk +########################################################### +endif # DLKM check diff --git a/sync_fence/Kbuild b/sync_fence/Kbuild new file mode 100644 index 0000000000..48cb10624b --- /dev/null +++ b/sync_fence/Kbuild @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only + +KDIR := $(TOP)/kernel_platform/msm-kernel +LINUXINCLUDE += -I$(SYNC_FENCE_ROOT)sync_fence/include/ + +obj-m += sync_fence.o + +sync_fence-y := src/qcom_sync_file.o + +CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/sync_fence/Makefile b/sync_fence/Makefile new file mode 100644 index 0000000000..ecd6ef1771 --- /dev/null +++ b/sync_fence/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 + +KBUILD_OPTIONS += SYNC_FENCE_ROOT=$(KERNEL_SRC)/$(M)/../ + +all: modules + +modules_install: + $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install + +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) + +clean: + rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers + rm -rf .tmp_versions From 40f846646bdf59667eb577a84fde8cd8e3cb9181 Mon Sep 17 00:00:00 2001 From: Sandeep Gangadharaiah Date: Tue, 8 Feb 2022 13:48:19 -0500 Subject: [PATCH 005/166] mm-drivers: msm_ext_display: export msm-ext-display module symbols msm_ext_display module symbols have to be exported before they can be used by external kernel modules. This change updates the makefile for the same. Change-Id: I86dbc2d8bbc0a3a0d640172ef0aebc03723eecc8 Signed-off-by: Sandeep Gangadharaiah --- msm_ext_display/Android.mk | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/msm_ext_display/Android.mk b/msm_ext_display/Android.mk index feff0e4139..78d659c784 100644 --- a/msm_ext_display/Android.mk +++ b/msm_ext_display/Android.mk @@ -18,6 +18,16 @@ KBUILD_OPTIONS := MSM_EXT_DISPLAY_ROOT=$(MSM_EXT_DISPLAY_BLD_DIR) KBUILD_OPTIONS += MODNAME=msm_ext_display KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) +########################################################### +include $(CLEAR_VARS) +# For incremental compilation +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := msm-ext-disp-module-symvers +LOCAL_MODULE_STEM := Module.symvers +LOCAL_MODULE_KBUILD_NAME := Module.symvers +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk ########################################################### include $(CLEAR_VARS) LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) From 5b5214916c3fe0f104bdf59f5ca61b93e928051f Mon Sep 17 00:00:00 2001 From: Jeykumar Sankaran Date: Fri, 1 Apr 2022 19:07:41 -0700 Subject: [PATCH 006/166] mm-drivers: sync_fence: avoid compiling spec_fence driver for taro sync_fence driver is maintained in kernel SI for all the taro variants. Since the Display SI 3.0 is shared with taro dev SI variant, avoid compiling sync_fence as dlkm for taro target. Change-Id: Icc7990812256a42efad7a8945c08338f83ee0914 Signed-off-by: Jeykumar Sankaran --- Android.mk | 7 ++++++- mm_driver_board.mk | 15 +++++++++------ mm_driver_product.mk | 6 +++++- 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/Android.mk b/Android.mk index 5053e7d643..d9bbda84c5 100644 --- a/Android.mk +++ b/Android.mk @@ -1 +1,6 @@ -include $(call all-subdir-makefiles) +MM_DRIVER_PATH := $(call my-dir) +include $(MM_DRIVER_PATH)/msm_ext_display/Android.mk +ifneq ($(TARGET_BOARD_PLATFORM), taro) +include $(MM_DRIVER_PATH)/sync_fence/Android.mk +endif + diff --git a/mm_driver_board.mk b/mm_driver_board.mk index 4ee3326519..0563c64f97 100644 --- a/mm_driver_board.mk +++ b/mm_driver_board.mk @@ -2,11 +2,14 @@ ifneq ($(TARGET_BOARD_AUTO),true) ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) - BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ - $(KERNEL_MODULES_OUT)/sync_fence.ko - BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ - $(KERNEL_MODULES_OUT)/sync_fence.ko - BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ - $(KERNEL_MODULES_OUT)/sync_fence.ko + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + + ifneq ($(TARGET_BOARD_PLATFORM), taro) + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/sync_fence.ko + endif endif endif diff --git a/mm_driver_product.mk b/mm_driver_product.mk index 2f0db285b4..4d74d27bf4 100644 --- a/mm_driver_product.mk +++ b/mm_driver_product.mk @@ -1,3 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only -PRODUCT_PACKAGES += msm_ext_display.ko sync_fence.ko +PRODUCT_PACKAGES += msm_ext_display.ko + +ifneq ($(TARGET_BOARD_PLATFORM), taro) +PRODUCT_PACKAGES += sync_fence.ko +endif From 03173f7cc81b11d359203f7ece6f3e41e7b91dc7 Mon Sep 17 00:00:00 2001 From: Narendra Muppalla Date: Wed, 30 Mar 2022 12:14:59 -0700 Subject: [PATCH 007/166] mm-drivers: sync-fence: add changes to serialize fence operations This change acquires fence_lock to serialize the enable_sw signalling operation on dma_fence_array. It bails out safely if the bind operation is called twice on the spec fence. The error level for bind failure with invalid user fd is changed to warning as this case can be treated non fatal. Change-Id: I688cbc84ba3cfb49c54de9b5e1bf8a9ec9d8da3a Signed-off-by: Narendra Muppalla --- sync_fence/src/qcom_sync_file.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/sync_fence/src/qcom_sync_file.c b/sync_fence/src/qcom_sync_file.c index 3cb2178412..3c006cc35b 100644 --- a/sync_fence/src/qcom_sync_file.c +++ b/sync_fence/src/qcom_sync_file.c @@ -276,6 +276,13 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) ret = -EINVAL; goto end; } + + if (fence_array->fences) { + pr_err("fence array already populated, spec fd:%d status:%d flags:0x%x\n", + sync_bind_info->out_bind_fd, dma_fence_get_status(fence), fence->flags); + goto end; + } + num_fences = fence_array->num_fences; counter = num_fences; @@ -298,11 +305,12 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) goto out; } + spin_lock(fence->lock); fence_array->fences = fence_list; for (i = 0; i < num_fences; i++) { user_fence = sync_file_get_fence(user_fds[i]); if (!user_fence) { - pr_err("bind fences are invalid !! user_fd:%d out_bind_fd:%d\n", + pr_warn("bind fences are invalid !! user_fd:%d out_bind_fd:%d\n", user_fds[i], sync_bind_info->out_bind_fd); counter = i; ret = -EINVAL; @@ -314,6 +322,7 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) } clear_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); + spin_unlock(fence->lock); dma_fence_enable_sw_signaling(&fence_array->base); clear_fence_array_tracker(false); @@ -327,6 +336,7 @@ bind_invalid: fence_array->fences = NULL; fence_array->num_fences = 0; dma_fence_set_error(fence, -EINVAL); + spin_unlock(fence->lock); dma_fence_signal(fence); clear_fence_array_tracker(false); } From 7deaa672388e7433d849acca267e517fbc8f69d6 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Fri, 4 Feb 2022 14:14:18 -0800 Subject: [PATCH 008/166] mm-drivers: sync: add api to wait for sync fence bind This change adds an api to check if a given speculative fence is bound. If fence is not bound, it will wait for the speculative fence ioctl to bind the fence, or else timeout. Change-Id: I9a86d09df410e89137264be47763ae39f06eea2b Signed-off-by: Ingrid Gallardo --- config/kalamammdrivers.conf | 1 + config/kalamammdriversconf.h | 1 + sync_fence/Kbuild | 2 ++ sync_fence/src/qcom_sync_file.c | 35 ++++++++++++++++++++++++++++++++- 4 files changed, 38 insertions(+), 1 deletion(-) diff --git a/config/kalamammdrivers.conf b/config/kalamammdrivers.conf index 3df22e75d5..4f932958dd 100644 --- a/config/kalamammdrivers.conf +++ b/config/kalamammdrivers.conf @@ -3,3 +3,4 @@ # Copyright (c) 2020, The Linux Foundation. All rights reserved. export CONFIG_MSM_EXT_DISPLAY=y +export CONFIG_QCOM_SPEC_SYNC=y diff --git a/config/kalamammdriversconf.h b/config/kalamammdriversconf.h index 26ca25d02f..59c3a05b66 100644 --- a/config/kalamammdriversconf.h +++ b/config/kalamammdriversconf.h @@ -5,3 +5,4 @@ */ #define CONFIG_MSM_EXT_DISPLAY 1 +#define CONFIG_QCOM_SPEC_SYNC 1 diff --git a/sync_fence/Kbuild b/sync_fence/Kbuild index 48cb10624b..b91ec8c93c 100644 --- a/sync_fence/Kbuild +++ b/sync_fence/Kbuild @@ -3,8 +3,10 @@ KDIR := $(TOP)/kernel_platform/msm-kernel LINUXINCLUDE += -I$(SYNC_FENCE_ROOT)sync_fence/include/ +ifdef CONFIG_QCOM_SPEC_SYNC obj-m += sync_fence.o sync_fence-y := src/qcom_sync_file.o CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" +endif diff --git a/sync_fence/src/qcom_sync_file.c b/sync_fence/src/qcom_sync_file.c index 3c006cc35b..b3ecf4eb1f 100644 --- a/sync_fence/src/qcom_sync_file.c +++ b/sync_fence/src/qcom_sync_file.c @@ -20,13 +20,13 @@ #include #include #include +#include #define CLASS_NAME "sync" #define DRV_NAME "spec_sync" #define DRV_VERSION 1 #define NAME_LEN 32 -#define SPEC_FENCE_FLAG_FENCE_ARRAY 0x10 /* user flags for debug */ #define FENCE_MIN 1 #define FENCE_MAX 32 @@ -44,6 +44,7 @@ struct sync_device { uint32_t version; struct mutex l_lock; struct list_head fence_array_list; + wait_queue_head_t wait_queue; }; struct fence_array_node { @@ -254,6 +255,34 @@ static int spec_sync_ioctl_create_fence(struct sync_device *obj, unsigned long _ return 0; } +int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_ms) +{ + int ret; + + /* Check if fence-array is a speculative fence */ + if (!fence_array || !test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence_array->base.flags)) { + pr_err("invalid fence!\n"); + return -EINVAL; + } else if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags)) { + /* This fence-array is already bound, just return success */ + return 0; + } + + /* Wait for the fence-array bind */ + ret = wait_event_timeout(sync_dev.wait_queue, + test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags), + msecs_to_jiffies(timeout_ms)); + if (!ret) { + pr_err("timed out waiting for bind fence-array %d\n", timeout_ms); + ret = -ETIMEDOUT; + } else { + ret = 0; + } + + return ret; +} +EXPORT_SYMBOL(spec_sync_wait_bind_array); + static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) { struct dma_fence_array *fence_array; @@ -328,6 +357,9 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) clear_fence_array_tracker(false); bind_invalid: + set_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags); + wake_up_all(&sync_dev.wait_queue); + if (ret) { for (i = counter - 1; i >= 0; i--) dma_fence_put(fence_array->fences[i]); @@ -434,6 +466,7 @@ static int spec_sync_register_device(void) mutex_init(&sync_dev.lock); mutex_init(&sync_dev.l_lock); INIT_LIST_HEAD(&sync_dev.fence_array_list); + init_waitqueue_head(&sync_dev.wait_queue); return 0; From 136755f1817b4a7ccfe08365215b82e2934b2e64 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Mon, 18 Apr 2022 20:47:21 -0700 Subject: [PATCH 009/166] mm-drivers: sync: resolve compilation of sync fence driver Sync fence driver setup only compiles if the CONFIG flag for sync driver is set, however, this requires config files of the parent folder to be included, which currently is not happening. Resolve this problem by including the parent mm-drivers config files. Change-Id: I812612b71003ed007d60c046dcef5bcbe09f6e7c Signed-off-by: Ingrid Gallardo --- sync_fence/Kbuild | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sync_fence/Kbuild b/sync_fence/Kbuild index b91ec8c93c..fd631a4348 100644 --- a/sync_fence/Kbuild +++ b/sync_fence/Kbuild @@ -2,6 +2,8 @@ KDIR := $(TOP)/kernel_platform/msm-kernel LINUXINCLUDE += -I$(SYNC_FENCE_ROOT)sync_fence/include/ +include $(SYNC_FENCE_ROOT)/config/kalamammdrivers.conf +LINUXINCLUDE += -include $(SYNC_FENCE_ROOT)/config/kalamammdriversconf.h ifdef CONFIG_QCOM_SPEC_SYNC obj-m += sync_fence.o From 77ae3f31f06e55407773e9546e607972164880d6 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Wed, 17 Nov 2021 17:16:04 -0800 Subject: [PATCH 010/166] mm-drivers: hw_fence: Add support for hw-fence driver This change adds support for the hw-fence driver that initialize, expose and manage the interfaces for the hw-fences, which are the synchronization primitives to allow the hardware to hardware signalization of the fences for the frame buffers shared between gpu and display hw-cores. Change-Id: If2313585d5a9f3ac90e16aad3464600641a6fa04 Signed-off-by: Ingrid Gallardo --- Android.mk | 1 + config/kalamammdrivers.conf | 1 + config/kalamammdriversconf.h | 1 + hw_fence/Android.mk | 41 + hw_fence/Kbuild | 18 + hw_fence/Makefile | 14 + hw_fence/include/hw_fence_drv_debug.h | 61 ++ hw_fence/include/hw_fence_drv_ipc.h | 90 ++ hw_fence/include/hw_fence_drv_priv.h | 386 ++++++++ hw_fence/include/hw_fence_drv_utils.h | 113 +++ hw_fence/src/hw_fence_drv_debug.c | 1000 +++++++++++++++++++ hw_fence/src/hw_fence_drv_ipc.c | 247 +++++ hw_fence/src/hw_fence_drv_priv.c | 1317 +++++++++++++++++++++++++ hw_fence/src/hw_fence_drv_utils.c | 644 ++++++++++++ hw_fence/src/msm_hw_fence.c | 486 +++++++++ mm_driver_board.mk | 9 +- mm_driver_product.mk | 2 +- 17 files changed, 4427 insertions(+), 4 deletions(-) create mode 100644 hw_fence/Android.mk create mode 100644 hw_fence/Kbuild create mode 100644 hw_fence/Makefile create mode 100644 hw_fence/include/hw_fence_drv_debug.h create mode 100644 hw_fence/include/hw_fence_drv_ipc.h create mode 100644 hw_fence/include/hw_fence_drv_priv.h create mode 100644 hw_fence/include/hw_fence_drv_utils.h create mode 100644 hw_fence/src/hw_fence_drv_debug.c create mode 100644 hw_fence/src/hw_fence_drv_ipc.c create mode 100644 hw_fence/src/hw_fence_drv_priv.c create mode 100644 hw_fence/src/hw_fence_drv_utils.c create mode 100644 hw_fence/src/msm_hw_fence.c diff --git a/Android.mk b/Android.mk index d9bbda84c5..c703795324 100644 --- a/Android.mk +++ b/Android.mk @@ -1,5 +1,6 @@ MM_DRIVER_PATH := $(call my-dir) include $(MM_DRIVER_PATH)/msm_ext_display/Android.mk +include $(MM_DRIVER_PATH)/hw_fence/Android.mk ifneq ($(TARGET_BOARD_PLATFORM), taro) include $(MM_DRIVER_PATH)/sync_fence/Android.mk endif diff --git a/config/kalamammdrivers.conf b/config/kalamammdrivers.conf index 4f932958dd..4e657d38be 100644 --- a/config/kalamammdrivers.conf +++ b/config/kalamammdrivers.conf @@ -4,3 +4,4 @@ export CONFIG_MSM_EXT_DISPLAY=y export CONFIG_QCOM_SPEC_SYNC=y +export CONFIG_QTI_HW_FENCE=y diff --git a/config/kalamammdriversconf.h b/config/kalamammdriversconf.h index 59c3a05b66..b9cb331bda 100644 --- a/config/kalamammdriversconf.h +++ b/config/kalamammdriversconf.h @@ -6,3 +6,4 @@ #define CONFIG_MSM_EXT_DISPLAY 1 #define CONFIG_QCOM_SPEC_SYNC 1 +#define CONFIG_QTI_HW_FENCE 1 diff --git a/hw_fence/Android.mk b/hw_fence/Android.mk new file mode 100644 index 0000000000..bad9f10b96 --- /dev/null +++ b/hw_fence/Android.mk @@ -0,0 +1,41 @@ +LOCAL_PATH := $(call my-dir) +include $(CLEAR_VARS) + +# This makefile is only for DLKM +ifneq ($(findstring vendor,$(LOCAL_PATH)),) + +ifneq ($(findstring opensource,$(LOCAL_PATH)),) + MSM_HW_FENCE_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/hw_fence +endif # opensource + +DLKM_DIR := $(TOP)/device/qcom/common/dlkm + +LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) + +########################################################### +# This is set once per LOCAL_PATH, not per (kernel) module +KBUILD_OPTIONS := MSM_HW_FENCE_ROOT=$(MSM_HW_FENCE_BLD_DIR) +KBUILD_OPTIONS += MODNAME=msm_hw_fence +KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) + +########################################################### +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := hw-fence-module-symvers +LOCAL_MODULE_STEM := Module.symvers +LOCAL_MODULE_KBUILD_NAME := Module.symvers +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk +########################################################### +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := msm_hw_fence.ko +LOCAL_MODULE_KBUILD_NAME := msm_hw_fence.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk +########################################################### +endif # DLKM check diff --git a/hw_fence/Kbuild b/hw_fence/Kbuild new file mode 100644 index 0000000000..fcd6b6e7bb --- /dev/null +++ b/hw_fence/Kbuild @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only + +KDIR := $(TOP)/kernel_platform/msm-kernel +include $(MSM_HW_FENCE_ROOT)/config/kalamammdrivers.conf +LINUXINCLUDE += -include $(MSM_HW_FENCE_ROOT)/config/kalamammdriversconf.h \ + -I$(MSM_HW_FENCE_ROOT)hw_fence/include/ + +ifdef CONFIG_QTI_HW_FENCE +obj-m += msm_hw_fence.o + +msm_hw_fence-y := src/msm_hw_fence.o \ + src/hw_fence_drv_priv.o \ + src/hw_fence_drv_utils.o \ + src/hw_fence_drv_debug.o \ + src/hw_fence_drv_ipc.o + +CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" +endif diff --git a/hw_fence/Makefile b/hw_fence/Makefile new file mode 100644 index 0000000000..ac6afd73be --- /dev/null +++ b/hw_fence/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +KBUILD_OPTIONS += MSM_HW_FENCE_ROOT=$(KERNEL_SRC)/$(M)/../ + +all: modules + +modules_install: + $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install + +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) + +clean: + rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers + rm -rf .tmp_versions diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h new file mode 100644 index 0000000000..d980331113 --- /dev/null +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __HW_FENCE_DRV_DEBUG +#define __HW_FENCE_DRV_DEBUG + +enum hw_fence_drv_prio { + HW_FENCE_HIGH = 0x000001, /* High density debug messages (noisy) */ + HW_FENCE_LOW = 0x000002, /* Low density debug messages */ + HW_FENCE_INFO = 0x000004, /* Informational prints */ + HW_FENCE_INIT = 0x00008, /* Initialization logs */ + HW_FENCE_QUEUE = 0x000010, /* Queue logs */ + HW_FENCE_LUT = 0x000020, /* Look-up and algorithm logs */ + HW_FENCE_IRQ = 0x000040, /* Interrupt-related messages */ + HW_FENCE_PRINTK = 0x010000, +}; + +extern u32 msm_hw_fence_debug_level; + +#define dprintk(__level, __fmt, ...) \ + do { \ + if (msm_hw_fence_debug_level & __level) \ + if (msm_hw_fence_debug_level & HW_FENCE_PRINTK) \ + pr_err(__fmt, ##__VA_ARGS__); \ + } while (0) + + +#define HWFNC_ERR(fmt, ...) \ + pr_err("[hwfence:%s:%d][err][%pS] "fmt, __func__, __LINE__, \ + __builtin_return_address(0), ##__VA_ARGS__) + +#define HWFNC_DBG_H(fmt, ...) \ + dprintk(HW_FENCE_HIGH, "[hwfence:%s:%d][dbgh]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_L(fmt, ...) \ + dprintk(HW_FENCE_LOW, "[hwfence:%s:%d][dbgl]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_INFO(fmt, ...) \ + dprintk(HW_FENCE_INFO, "[hwfence:%s:%d][dbgi]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_INIT(fmt, ...) \ + dprintk(HW_FENCE_INIT, "[hwfence:%s:%d][dbg]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_Q(fmt, ...) \ + dprintk(HW_FENCE_QUEUE, "[hwfence:%s:%d][dbgq]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_LUT(fmt, ...) \ + dprintk(HW_FENCE_LUT, "[hwfence:%s:%d][dbglut]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_IRQ(fmt, ...) \ + dprintk(HW_FENCE_IRQ, "[hwfence:%s:%d][dbgirq]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_WARN(fmt, ...) \ + pr_warn("[hwfence:%s:%d][warn][%pS] "fmt, __func__, __LINE__, \ + __builtin_return_address(0), ##__VA_ARGS__) + +int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data); + +#endif /* __HW_FENCE_DRV_DEBUG */ diff --git a/hw_fence/include/hw_fence_drv_ipc.h b/hw_fence/include/hw_fence_drv_ipc.h new file mode 100644 index 0000000000..c24781ac36 --- /dev/null +++ b/hw_fence/include/hw_fence_drv_ipc.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __HW_FENCE_DRV_IPC_H +#define __HW_FENCE_DRV_IPC_H + +#define HW_FENCE_IPC_CLIENT_ID_APPS 8 +#define HW_FENCE_IPC_CLIENT_ID_GPU 9 +#define HW_FENCE_IPC_CLIENT_ID_DPU 25 + +#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA 2 +#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO 1 +#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KAILUA 2 + +#define HW_FENCE_IPCC_HW_REV_100 0x00010000 /* Lahaina */ +#define HW_FENCE_IPCC_HW_REV_110 0x00010100 /* Waipio */ +#define HW_FENCE_IPCC_HW_REV_170 0x00010700 /* Kailua */ + +#define IPC_PROTOCOLp_CLIENTc_VERSION(base, p, c) (base + (0x40000*p) + (0x1000*c)) +#define IPC_PROTOCOLp_CLIENTc_CONFIG(base, p, c) (base + 0x8 + (0x40000*p) + (0x1000*c)) +#define IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(base, p, c) \ + (base + 0x14 + (0x40000*p) + (0x1000*c)) +#define IPC_PROTOCOLp_CLIENTc_SEND(base, p, c) ((base + 0xc) + (0x40000*p) + (0x1000*c)) + +/** + * hw_fence_ipcc_trigger_signal() - Trigger ipc signal for the requested client/signal pair. + * @drv_data: driver data. + * @tx_client_id: ipc client id that sends the ipc signal. + * @rx_client_id: ipc client id that receives the ipc signal. + * @signal_id: signal id to send. + * + * This API triggers the ipc 'signal_id' from the 'tx_client_id' to the 'rx_client_id' + */ +void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data, + u32 tx_client_id, u32 rx_client_id, u32 signal_id); + +/** + * hw_fence_ipcc_enable_signaling() - Enable ipcc signaling for hw-fence driver. + * @drv_data: driver data. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data); + +#ifdef HW_DPU_IPCC +/** + * hw_fence_ipcc_enable_dpu_signaling() - Enable ipcc signaling for dpu client. + * @drv_data: driver data. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data); +#endif /* HW_DPU_IPCC */ + +/** + * hw_fence_ipcc_get_client_id() - Returns the ipc client id that corresponds to the hw fence + * driver client. + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * The ipc client id returned by this API is used by the hw fence driver when signaling the fence. + * + * Return: client_id on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_get_client_id(struct hw_fence_driver_data *drv_data, u32 client_id); + +/** + * hw_fence_ipcc_get_signal_id() - Returns the ipc signal id that corresponds to the hw fence + * driver client. + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * The ipc signal id returned by this API is used by the hw fence driver when signaling the fence. + * + * Return: client_id on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id); + +/** + * hw_fence_ipcc_needs_rxq_update() - Returns bool to indicate if client uses rx-queue. + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * Return: true if client needs to update rxq, false otherwise + */ +bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id); + +#endif /* __HW_FENCE_DRV_IPC_H */ diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h new file mode 100644 index 0000000000..e15fd4159c --- /dev/null +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -0,0 +1,386 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __HW_FENCE_DRV_INTERNAL_H +#define __HW_FENCE_DRV_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include + +/* Add define only for platforms that support IPCC in dpu-hw */ +#define HW_DPU_IPCC 1 + +/* max u64 to indicate invalid fence */ +#define HW_FENCE_INVALID_PARENT_FENCE (~0ULL) + +/* hash algorithm constants */ +#define HW_FENCE_HASH_A_MULT 4969 /* a multiplier for Hash algorithm */ +#define HW_FENCE_HASH_C_MULT 907 /* c multiplier for Hash algorithm */ + +/* number of queues per type (i.e. ctrl or client queues) */ +#define HW_FENCE_CTRL_QUEUES 2 /* Rx and Tx Queues */ +#define HW_FENCE_CLIENT_QUEUES 2 /* Rx and Tx Queues */ + +/* hfi headers calculation */ +#define HW_FENCE_HFI_TABLE_HEADER_SIZE (sizeof(struct msm_hw_fence_hfi_queue_table_header)) +#define HW_FENCE_HFI_QUEUE_HEADER_SIZE (sizeof(struct msm_hw_fence_hfi_queue_header)) + +#define HW_FENCE_HFI_CTRL_HEADERS_SIZE (HW_FENCE_HFI_TABLE_HEADER_SIZE + \ + (HW_FENCE_HFI_QUEUE_HEADER_SIZE * HW_FENCE_CTRL_QUEUES)) + +#define HW_FENCE_HFI_CLIENT_HEADERS_SIZE (HW_FENCE_HFI_TABLE_HEADER_SIZE + \ + (HW_FENCE_HFI_QUEUE_HEADER_SIZE * HW_FENCE_CLIENT_QUEUES)) + +/* + * Max Payload size is the bigest size of the message that we can have in the CTRL queue + * in this case the max message is calculated like following, using 32-bits elements: + * 1 header + 1 msg-type + 1 client_id + 2 hash + 1 error + */ +#define HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE ((1 + 1 + 1 + 2 + 1) * sizeof(u32)) + +#define HW_FENCE_CTRL_QUEUE_PAYLOAD HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE +#define HW_FENCE_CLIENT_QUEUE_PAYLOAD (sizeof(struct msm_hw_fence_queue_payload)) + +/* Locks area for all the clients */ +#define HW_FENCE_MEM_LOCKS_SIZE (sizeof(u64) * (HW_FENCE_CLIENT_MAX - 1)) + +#define HW_FENCE_TX_QUEUE 1 +#define HW_FENCE_RX_QUEUE 2 + +/* ClientID for the internal join fence, this is used by the framework when creating a join-fence */ +#define HW_FENCE_JOIN_FENCE_CLIENT_ID (~(u32)0) + +/** + * msm hw fence flags: + * MSM_HW_FENCE_FLAG_SIGNAL - Flag set when the hw-fence is signaled + */ +#define MSM_HW_FENCE_FLAG_SIGNAL BIT(0) + +/** + * MSM_HW_FENCE_MAX_JOIN_PARENTS: + * Maximum number of parents that a fence can have for a join-fence + */ +#define MSM_HW_FENCE_MAX_JOIN_PARENTS 3 + +enum hw_fence_lookup_ops { + HW_FENCE_LOOKUP_OP_CREATE = 0x1, + HW_FENCE_LOOKUP_OP_DESTROY, + HW_FENCE_LOOKUP_OP_CREATE_JOIN, + HW_FENCE_LOOKUP_OP_FIND_FENCE +}; + +/** + * enum hw_fence_loopback_id - Enum with the clients having a loopback signal (i.e AP to AP signal). + * HW_FENCE_LOOPBACK_DPU_CTL_0: dpu client 0. Used in platforms with no dpu-ipc. + * HW_FENCE_LOOPBACK_DPU_CTL_1: dpu client 1. Used in platforms with no dpu-ipc. + * HW_FENCE_LOOPBACK_DPU_CTL_2: dpu client 2. Used in platforms with no dpu-ipc. + * HW_FENCE_LOOPBACK_DPU_CTL_3: dpu client 3. Used in platforms with no dpu-ipc. + * HW_FENCE_LOOPBACK_DPU_CTL_4: dpu client 4. Used in platforms with no dpu-ipc. + * HW_FENCE_LOOPBACK_DPU_CTL_5: dpu client 5. Used in platforms with no dpu-ipc. + * HW_FENCE_LOOPBACK_DPU_CTX_0: gfx client 0. Used in platforms with no gmu support. + */ +enum hw_fence_loopback_id { + HW_FENCE_LOOPBACK_DPU_CTL_0, + HW_FENCE_LOOPBACK_DPU_CTL_1, + HW_FENCE_LOOPBACK_DPU_CTL_2, + HW_FENCE_LOOPBACK_DPU_CTL_3, + HW_FENCE_LOOPBACK_DPU_CTL_4, + HW_FENCE_LOOPBACK_DPU_CTL_5, + HW_FENCE_LOOPBACK_GFX_CTX_0, + HW_FENCE_LOOPBACK_MAX, +}; + +#define HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS (HW_FENCE_LOOPBACK_DPU_CTL_5 + 1) + +/** + * struct msm_hw_fence_queue - Structure holding the data of the hw fence queues. + * @va_queue: pointer to the virtual address of the queue elements + * @q_size_bytes: size of the queue + * @va_header: pointer to the hfi header virtual address + * @pa_queue: physical address of the queue + */ +struct msm_hw_fence_queue { + void *va_queue; + u32 q_size_bytes; + void *va_header; + phys_addr_t pa_queue; +}; + +/** + * struct msm_hw_fence_client - Structure holding the per-Client allocated resources. + * @client_id: id of the client + * @mem_descriptor: hfi header memory descriptor + * @queues: queues descriptor + * @ipc_signal_id: id of the signal to be triggered for this client + * @ipc_client_id: id of the ipc client for this hw fence driver client + * @update_rxq: bool to indicate if client uses rx-queue + */ +struct msm_hw_fence_client { + enum hw_fence_client_id client_id; + struct msm_hw_fence_mem_addr mem_descriptor; + struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES]; + int ipc_signal_id; + int ipc_client_id; + bool update_rxq; +}; + +/** + * struct msm_hw_fence_mem_data - Structure holding internal memory attributes + * + * @attrs: attributes for the memory allocation + */ +struct msm_hw_fence_mem_data { + unsigned long attrs; +}; + +/** + * struct msm_hw_fence_dbg_data - Structure holding debugfs data + * + * @root: debugfs root + * @entry_rd: flag to indicate if debugfs dumps a single line or table + * @context_rd: debugfs setting to indicate which context id to dump + * @seqno_rd: debugfs setting to indicate which seqno to dump + * @hw_fence_sim_release_delay: delay in micro seconds for the debugfs node that simulates the + * hw-fences behavior, to release the hw-fences + * @create_hw_fences: boolean to continuosly create hw-fences within debugfs + * @clients_list: list of debug clients registered + * @clients_list_lock: lock to synchronize access to the clients list + */ +struct msm_hw_fence_dbg_data { + struct dentry *root; + + bool entry_rd; + u64 context_rd; + u64 seqno_rd; + + u32 hw_fence_sim_release_delay; + bool create_hw_fences; + + struct list_head clients_list; + struct mutex clients_list_lock; +}; + +/** + * struct hw_fence_driver_data - Structure holding internal hw-fence driver data + * + * @dev: device driver pointer + * @resources_ready: value set by driver at end of probe, once all resources are ready + * @hw_fence_table_entries: total number of hw-fences in the global table + * @hw_fence_mem_fences_table_size: hw-fences global table total size + * @hw_fence_queue_entries: total number of entries that can be available in the queue + * @hw_fence_ctrl_queue_size: size of the ctrl queue for the payload + * @hw_fence_mem_ctrl_queues_size: total size of ctrl queues, including: header + rxq + txq + * @hw_fence_client_queue_size: size of the client queue for the payload + * @hw_fence_mem_clients_queues_size: total size of client queues, including: header + rxq + txq + * @hw_fences_tbl: pointer to the hw-fences table + * @hw_fences_tbl_cnt: number of elements in the hw-fence table + * @client_lock_tbl: pointer to the per-client locks table + * @client_lock_tbl_cnt: number of elements in the locks table + * @hw_fences_mem_desc: memory descriptor for the hw-fence table + * @clients_locks_mem_desc: memory descriptor for the locks table + * @ctrl_queue_mem_desc: memory descriptor for the ctrl queues + * @ctrl_queues: pointer to the ctrl queues + * @io_mem_base: pointer to the carved-out io memory + * @res: resources for the carved out memory + * @size: size of the carved-out memory + * @label: label for the carved-out memory (this is used by SVM to find the memory) + * @peer_name: peer name for this carved-out memory + * @rm_nb: hyp resource manager notifier + * @memparcel: memparcel for the allocated memory + * @db_label: doorbell label + * @rx_dbl: handle to the Rx doorbell + * @debugfs_data: debugfs info + * @ipcc_reg_base: base for ipcc regs mapping + * @ipcc_io_mem: base for the ipcc io mem map + * @ipcc_size: size of the ipcc io mem mapping + * @protocol_id: ipcc protocol id used by this driver + * @ipcc_client_id: ipcc client id for this driver + * @ipc_clients_table: table with the ipcc mapping for each client of this driver + * @qtime_reg_base: qtimer register base address + * @qtime_io_mem: qtimer io mem map + * @qtime_size: qtimer io mem map size + * @ctl_start_ptr: pointer to the ctl_start registers of the display hw (platforms with no dpu-ipc) + * @ctl_start_size: size of the ctl_start registers of the display hw (platforms with no dpu-ipc) + * @client_id_mask: bitmask for tracking registered client_ids + * @clients_mask_lock: lock to synchronize access to the clients mask + * @msm_hw_fence_client: table with the handles of the registered clients + * @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized + */ +struct hw_fence_driver_data { + + struct device *dev; + bool resources_ready; + + /* Table & Queues info */ + u32 hw_fence_table_entries; + u32 hw_fence_mem_fences_table_size; + u32 hw_fence_queue_entries; + /* ctrl queues */ + u32 hw_fence_ctrl_queue_size; + u32 hw_fence_mem_ctrl_queues_size; + /* client queues */ + u32 hw_fence_client_queue_size; + u32 hw_fence_mem_clients_queues_size; + + /* HW Fences Table VA */ + struct msm_hw_fence *hw_fences_tbl; + u32 hw_fences_tbl_cnt; + + /* Table with a Per-Client Lock */ + u64 *client_lock_tbl; + u32 client_lock_tbl_cnt; + + /* Memory Descriptors */ + struct msm_hw_fence_mem_addr hw_fences_mem_desc; + struct msm_hw_fence_mem_addr clients_locks_mem_desc; + struct msm_hw_fence_mem_addr ctrl_queue_mem_desc; + struct msm_hw_fence_queue ctrl_queues[HW_FENCE_CTRL_QUEUES]; + + /* carved out memory */ + void __iomem *io_mem_base; + struct resource res; + size_t size; + u32 label; + u32 peer_name; + struct notifier_block rm_nb; + u32 memparcel; + + /* doorbell */ + u32 db_label; + + /* VM virq */ + void *rx_dbl; + + /* debugfs */ + struct msm_hw_fence_dbg_data debugfs_data; + + /* ipcc regs */ + phys_addr_t ipcc_reg_base; + void __iomem *ipcc_io_mem; + uint32_t ipcc_size; + u32 protocol_id; + u32 ipcc_client_id; + + /* table with mapping of ipc client for each hw-fence client */ + struct hw_fence_client_ipc_map *ipc_clients_table; + + /* qtime reg */ + phys_addr_t qtime_reg_base; + void __iomem *qtime_io_mem; + uint32_t qtime_size; + + /* base address for dpu ctl start regs */ + void *ctl_start_ptr[HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS]; + uint32_t ctl_start_size[HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS]; + + /* bitmask for tracking registered client_ids */ + u64 client_id_mask; + struct mutex clients_mask_lock; + + /* table with registered client handles */ + struct msm_hw_fence_client *clients[HW_FENCE_CLIENT_MAX]; +#ifdef HW_DPU_IPCC + /* state variables */ + bool ipcc_dpu_initialized; +#endif /* HW_DPU_IPCC */ +}; + +/** + * struct msm_hw_fence_queue_payload - hardware fence clients queues payload. + * @ctxt_id: context id of the dma fence + * @seqno: sequence number of the dma fence + * @hash: fence hash + * @flags: see MSM_HW_FENCE_FLAG_* flags descriptions + * @error: error code for this fence, fence controller receives this + * error from the signaling client through the tx queue and + * propagates the error to the waiting client through rx queue + */ +struct msm_hw_fence_queue_payload { + u64 ctxt_id; + u64 seqno; + u64 hash; + u64 flags; + u32 error; + u32 unused; /* align to 64-bit */ +}; + +/** + * struct msm_hw_fence - structure holding each hw fence data. + * @valid: field updated when a hw-fence is reserved. True if hw-fence is in use + * @error: field to hold a hw-fence error + * @ctx_id: context id + * @seq_id: sequence id + * @wait_client_mask: bitmask holding the waiting-clients of the fence + * @fence_allocator: field to indicate the client_id that reserved the fence + * @fence_signal-client: + * @lock: this field is required to share information between the Driver & Driver || + * Driver & FenceCTL. Needs to be 64-bit atomic inter-processor lock. + * @flags: field to indicate the state of the fence + * @parent_list: list of indexes with the parents for a child-fence in a join-fence + * @parent_cnt: total number of parents for a child-fence in a join-fence + * @pending_child_cnt: children refcount for a parent-fence in a join-fence. Access must be atomic + * or locked + * @fence_create_time: debug info with the create time timestamp + * @fence_trigger_time: debug info with the trigger time timestamp + * @fence_wait_time: debug info with the register-for-wait timestamp + * @debug_refcount: refcount used for debugging + */ +struct msm_hw_fence { + u32 valid; + u32 error; + u64 ctx_id; + u64 seq_id; + u64 wait_client_mask; + u32 fence_allocator; + u32 fence_signal_client; + u64 lock; /* Datatype must be 64-bit. */ + u64 flags; + u64 parent_list[MSM_HW_FENCE_MAX_JOIN_PARENTS]; + u32 parents_cnt; + u32 pending_child_cnt; + u64 fence_create_time; + u64 fence_trigger_time; + u64 fence_wait_time; + u64 debug_refcount; +}; + +int hw_fence_init(struct hw_fence_driver_data *drv_data); +int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + struct msm_hw_fence_mem_addr *mem_descriptor); +int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client); +int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client); +void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client); +int hw_fence_create(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + u64 context, u64 seqno, u64 *hash); +int hw_fence_destroy(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + u64 context, u64 seqno); +int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + struct dma_fence_array *array); +int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence); +int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, + u64 flags, u32 error, int queue_type); +inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data); +int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, + struct msm_hw_fence_queue_payload *payload, int queue_type); +int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno); +struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + u64 context, u64 seqno, u64 *hash); + +#endif /* __HW_FENCE_DRV_INTERNAL_H */ diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h new file mode 100644 index 0000000000..092bb625cf --- /dev/null +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __HW_FENCE_DRV_UTILS_H +#define __HW_FENCE_DRV_UTILS_H + +/** + * enum hw_fence_mem_reserve - Types of reservations for the carved-out memory. + * HW_FENCE_MEM_RESERVE_CTRL_QUEUE: Reserve memory for the ctrl rx/tx queues. + * HW_FENCE_MEM_RESERVE_LOCKS_REGION: Reserve memory for the per-client locks memory region. + * HW_FENCE_MEM_RESERVE_TABLE: Reserve memory for the hw-fences global table. + * HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: Reserve memory per-client for the rx/tx queues. + */ +enum hw_fence_mem_reserve { + HW_FENCE_MEM_RESERVE_CTRL_QUEUE, + HW_FENCE_MEM_RESERVE_LOCKS_REGION, + HW_FENCE_MEM_RESERVE_TABLE, + HW_FENCE_MEM_RESERVE_CLIENT_QUEUE +}; + +/** + * global_atomic_store() - Inter-processor lock + * @lock: memory to lock + * @val: if true, api locks the memory, if false it unlocks the memory + */ +void global_atomic_store(uint64_t *lock, bool val); + +/** + * hw_fence_utils_init_virq() - Initialilze doorbell (i.e. vIRQ) for SVM to HLOS signaling + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_process_doorbell_mask() - Sends doorbell mask to process the signaled clients + * this API is only exported for simulation purposes. + * @drv_data: hw fence driver data. + * @db_flags: doorbell flag + */ +void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags); + +/** + * hw_fence_utils_alloc_mem() - Allocates the carved-out memory pool that will be used for the HW + * Fence global table, locks and queues. + * @hw_fence_drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *hw_fence_drv_data); + +/** + * hw_fence_utils_reserve_mem() - Reserves memory from the carved-out memory pool. + * @drv_data: hw fence driver data. + * @type: memory reservation type. + * @phys: physical address of the carved-out memory pool + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, + enum hw_fence_mem_reserve type, phys_addr_t *phys, void **pa, u32 *size, int client_id); + +/** + * hw_fence_utils_parse_dt_props() - Init dt properties + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_map_ipcc() - Maps IPCC registers and enable signaling + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_map_qtime() - Maps qtime register + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_map_ctl_start() - Maps ctl_start registers from dpu hw + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. This API is only used + * for simulation purposes in platforms where dpu does not support ipc signal. + */ +int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_cleanup_fence() - Cleanup the hw-fence from a specified client + * @drv_data: hw fence driver data + * @hw_fence_client: client, for which the fence must be cleared + * @hw_fence: hw-fence to cleanup + * @hash: hash of the hw-fence to cleanup + * @reset_flags: flags to determine how to handle the reset + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, + u32 reset_flags); + +#endif /* __HW_FENCE_DRV_UTILS_H */ diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c new file mode 100644 index 0000000000..c047a3b251 --- /dev/null +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -0,0 +1,1000 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_debug.h" +#include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_utils.h" + +#define HW_FENCE_NAME_SIZE 64 +#define HW_FENCE_DEBUG_MAX_LOOPS 200 + +u32 msm_hw_fence_debug_level = HW_FENCE_PRINTK; + +/** + * struct client_data - Structure holding the data of the debug clients. + * + * @client_id: client id. + * @dma_context: context id to create the dma-fences for the client. + * @seqno_cnt: sequence number, this is a counter to simulate the seqno for debugging. + * @client_handle: handle for the client, this is returned by the hw-fence driver after + * a successful registration of the client. + * @mem_descriptor: memory descriptor for the client-queues. This is populated by the hw-fence + * driver after a successful registration of the client. + * @list: client node. + */ +struct client_data { + int client_id; + u64 dma_context; + u64 seqno_cnt; + void *client_handle; + struct msm_hw_fence_mem_addr mem_descriptor; + struct list_head list; +}; + +/** + * struct hw_dma_fence - fences created by hw-fence for debugging. + * @base: base dma-fence structure, this must remain at beginning of the struct. + * @name: name of each fence. + * @client_handle: handle for the client owner of this fence, this is returned by the hw-fence + * driver after a successful registration of the client and used by this fence + * during release. + */ +struct hw_dma_fence { + struct dma_fence base; + char name[HW_FENCE_NAME_SIZE]; + void *client_handle; +}; + +#if IS_ENABLED(CONFIG_DEBUG_FS) +static int _get_debugfs_input_client(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos, + struct hw_fence_driver_data **drv_data) +{ + char buf[10]; + int client_id; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data %d\n", !file); + return -EINVAL; + } + *drv_data = file->private_data; + + if (count >= sizeof(buf)) + return -EFAULT; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[count] = 0; /* end of string */ + + if (kstrtouint(buf, 0, &client_id)) + return -EFAULT; + + if (client_id < HW_FENCE_CLIENT_ID_CTX0 || client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", client_id, + HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_MAX); + return -EINVAL; + } + + return client_id; +} + +static int _debugfs_ipcc_trigger(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos, u32 tx_client, u32 rx_client) +{ + struct hw_fence_driver_data *drv_data; + int client_id, signal_id; + + client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data); + if (client_id < 0) + return -EINVAL; + + /* Get signal-id that hw-fence driver would trigger for this client */ + signal_id = hw_fence_ipcc_get_signal_id(drv_data, client_id); + if (signal_id < 0) + return -EINVAL; + + HWFNC_DBG_IRQ("client_id:%d ipcc write tx_client:%d rx_client:%d signal_id:%d qtime:%llu\n", + client_id, tx_client, rx_client, signal_id, hw_fence_get_qtime(drv_data)); + hw_fence_ipcc_trigger_signal(drv_data, tx_client, rx_client, signal_id); + + return count; +} + +/** + * hw_fence_dbg_ipcc_write() - debugfs write to trigger an ipcc irq. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter a hw-fence driver client_id, and triggers an ipcc signal + * from apps to apps for that client id. + */ +static ssize_t hw_fence_dbg_ipcc_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + return _debugfs_ipcc_trigger(file, user_buf, count, ppos, HW_FENCE_IPC_CLIENT_ID_APPS, + HW_FENCE_IPC_CLIENT_ID_APPS); +} + +#ifdef HW_DPU_IPCC +/** + * hw_fence_dbg_ipcc_dpu_write() - debugfs write to trigger an ipcc irq to dpu core. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter a hw-fence driver client_id, and triggers an ipcc signal + * from apps to dpu for that client id. + */ +static ssize_t hw_fence_dbg_ipcc_dpu_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + return _debugfs_ipcc_trigger(file, user_buf, count, ppos, HW_FENCE_IPC_CLIENT_ID_APPS, + HW_FENCE_IPC_CLIENT_ID_DPU); + +} + +static const struct file_operations hw_fence_dbg_ipcc_dpu_fops = { + .open = simple_open, + .write = hw_fence_dbg_ipcc_dpu_write, +}; +#endif /* HW_DPU_IPCC */ + +static const struct file_operations hw_fence_dbg_ipcc_fops = { + .open = simple_open, + .write = hw_fence_dbg_ipcc_write, +}; + +static inline struct hw_dma_fence *to_hw_dma_fence(struct dma_fence *fence) +{ + return container_of(fence, struct hw_dma_fence, base); +} + +static const char *hw_fence_dbg_get_driver_name(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); + + return hw_dma_fence->name; +} + +static const char *hw_fence_dbg_get_timeline_name(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); + + return hw_dma_fence->name; +} + +static bool hw_fence_dbg_enable_signaling(struct dma_fence *fence) +{ + return true; +} + +static void _hw_fence_release(struct hw_dma_fence *hw_dma_fence) +{ + if (IS_ERR_OR_NULL(hw_dma_fence->client_handle)) { + HWFNC_ERR("invalid hwfence data, won't release hw_fence\n"); + return; + } + + /* release hw-fence */ + if (msm_hw_fence_destroy(hw_dma_fence->client_handle, &hw_dma_fence->base)) + HWFNC_ERR("failed to release hw_fence\n"); +} + +static void hw_fence_dbg_release(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence; + + if (!fence) + return; + + HWFNC_DBG_H("release backing fence %pK\n", fence); + hw_dma_fence = to_hw_dma_fence(fence); + + if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) + _hw_fence_release(hw_dma_fence); + + kfree(fence->lock); + kfree(hw_dma_fence); +} + +static struct dma_fence_ops hw_fence_dbg_ops = { + .get_driver_name = hw_fence_dbg_get_driver_name, + .get_timeline_name = hw_fence_dbg_get_timeline_name, + .enable_signaling = hw_fence_dbg_enable_signaling, + .wait = dma_fence_default_wait, + .release = hw_fence_dbg_release, +}; + +struct client_data *_get_client_node(struct hw_fence_driver_data *drv_data, u32 client_id) +{ + struct client_data *node = NULL; + bool found = false; + + mutex_lock(&drv_data->debugfs_data.clients_list_lock); + list_for_each_entry(node, &drv_data->debugfs_data.clients_list, list) { + if (node->client_id == client_id) { + found = true; + break; + } + } + mutex_unlock(&drv_data->debugfs_data.clients_list_lock); + + return found ? node : NULL; +} + +/** + * hw_fence_dbg_reset_client_wr() - debugfs write to trigger reset in a debug hw-fence client. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter a hw-fence driver client_id, and triggers a reset for + * this client. Note that this operation will only perform on hw-fence clients created through + * the debug framework. + */ +static ssize_t hw_fence_dbg_reset_client_wr(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + int client_id, ret; + struct client_data *client_info; + struct hw_fence_driver_data *drv_data; + + client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data); + if (client_id < 0) + return -EINVAL; + + client_info = _get_client_node(drv_data, client_id); + if (!client_info || IS_ERR_OR_NULL(client_info->client_handle)) { + HWFNC_ERR("client:%d not registered as debug client\n", client_id); + return -EINVAL; + } + + HWFNC_DBG_H("resetting client: %d\n", client_id); + ret = msm_hw_fence_reset_client(client_info->client_handle, 0); + if (ret) + HWFNC_ERR("failed to reset client:%d\n", client_id); + + return count; +} + +/** + * hw_fence_dbg_register_clients_wr() - debugfs write to register a client with the hw-fence + * driver for debugging. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter a hw-fence driver client_id to register for debug. + * Note that if the client_id received was already registered by any other driver, the + * registration here will fail. + */ +static ssize_t hw_fence_dbg_register_clients_wr(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + int client_id; + struct client_data *client_info; + struct hw_fence_driver_data *drv_data; + + client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data); + if (client_id < 0) + return -EINVAL; + + /* we cannot create same debug client twice */ + if (_get_client_node(drv_data, client_id)) { + HWFNC_ERR("client:%d already registered as debug client\n", client_id); + return -EINVAL; + } + + client_info = kzalloc(sizeof(*client_info), GFP_KERNEL); + if (!client_info) + return -ENOMEM; + + HWFNC_DBG_H("register client %d\n", client_id); + client_info->client_handle = msm_hw_fence_register(client_id, + &client_info->mem_descriptor); + if (IS_ERR_OR_NULL(client_info->client_handle)) { + HWFNC_ERR("error registering as debug client:%d\n", client_id); + client_info->client_handle = NULL; + return -EFAULT; + } + + client_info->dma_context = dma_fence_context_alloc(1); + client_info->client_id = client_id; + + mutex_lock(&drv_data->debugfs_data.clients_list_lock); + list_add(&client_info->list, &drv_data->debugfs_data.clients_list); + mutex_unlock(&drv_data->debugfs_data.clients_list_lock); + + return count; +} + +struct hw_fence_out_clients_map { + int ipc_client_id; /* ipc client id for the hw fence client */ + int ipc_signal_id; /* ipc signal id for the hw fence client */ +}; + +/* NOTE: These signals are the ones that the actual clients should be triggering, hw-fence driver + * does not need to have knowledge of these signals. Adding them here for debugging purposes. + * Only fence controller and the cliens know these id's, since these + * are to trigger the ipcc from the 'client hw-core' to the 'hw-fence controller' + * + * Note that the index of this struct must match the enum hw_fence_client_id + */ +struct hw_fence_out_clients_map dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_MAX] = { + {HW_FENCE_IPC_CLIENT_ID_APPS, 0}, /* CTRL_LOOPBACK */ + {HW_FENCE_IPC_CLIENT_ID_GPU, 0}, /* CTX0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 2}, /* CTL0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 4}, /* CTL1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 6}, /* CTL2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 8}, /* CTL3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 10}, /* CTL4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 12} /* CTL5 */ +}; + +/** + * hw_fence_dbg_tx_and_signal_clients_wr() - debugfs write to simulate the lifecycle of a hw-fence. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter the number of iterations that the simulation will run, + * each iteration will: create, signal, register-for-signal and destroy a hw-fence. + * Note that this simulation relies in the user first registering the clients as debug-clients + * through the debugfs 'hw_fence_dbg_register_clients_wr'. If the clients are not previously + * registered as debug-clients, this simulation will fail and won't run. + */ +static ssize_t hw_fence_dbg_tx_and_signal_clients_wr(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + u32 input_data, client_id_src, client_id_dst, tx_client, rx_client; + struct client_data *client_info_src, *client_info_dst; + struct hw_fence_driver_data *drv_data; + struct msm_hw_fence_client *hw_fence_client, *hw_fence_client_dst; + u64 context, seqno, hash; + char buf[10]; + int signal_id, ret; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data %d\n", file); + return -EINVAL; + } + drv_data = file->private_data; + + if (count >= sizeof(buf)) + return -EFAULT; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[count] = 0; /* end of string */ + + if (kstrtouint(buf, 0, &input_data)) + return -EFAULT; + + if (input_data <= 0) { + HWFNC_ERR("won't do anything, write value greather than 0 to start..\n"); + return 0; + } else if (input_data > HW_FENCE_DEBUG_MAX_LOOPS) { + HWFNC_ERR("requested loops:%d exceed max:%d, setting max\n", input_data, + HW_FENCE_DEBUG_MAX_LOOPS); + input_data = HW_FENCE_DEBUG_MAX_LOOPS; + } + + client_id_src = HW_FENCE_CLIENT_ID_CTL0; + client_id_dst = HW_FENCE_CLIENT_ID_CTL1; + + client_info_src = _get_client_node(drv_data, client_id_src); + client_info_dst = _get_client_node(drv_data, client_id_dst); + + if (!client_info_src || IS_ERR_OR_NULL(client_info_src->client_handle) || + !client_info_dst || IS_ERR_OR_NULL(client_info_dst->client_handle)) { + /* Make sure we registered this client through debugfs */ + HWFNC_ERR("client_id_src:%d or client_id_dst:%d not registered as debug client!\n", + client_id_src, client_id_dst); + return -EINVAL; + } + + hw_fence_client = (struct msm_hw_fence_client *)client_info_src->client_handle; + hw_fence_client_dst = (struct msm_hw_fence_client *)client_info_dst->client_handle; + + while (drv_data->debugfs_data.create_hw_fences && input_data > 0) { + + /***********************************************************/ + /***** SRC CLIENT - CREATE HW FENCE & TX QUEUE UPDATE ******/ + /***********************************************************/ + + /* we will use the context and the seqno of the source client */ + context = client_info_src->dma_context; + seqno = client_info_src->seqno_cnt; + + /* linear increment of the seqno for the src client*/ + client_info_src->seqno_cnt++; + + /* Create hw fence for src client */ + ret = hw_fence_create(drv_data, hw_fence_client, context, seqno, &hash); + if (ret) { + HWFNC_ERR("Error creating HW fence\n"); + goto exit; + } + + /* Write to Tx queue */ + hw_fence_update_queue(drv_data, hw_fence_client, context, seqno, hash, + 0, 0, HW_FENCE_TX_QUEUE - 1); // no flags and no error + + /**********************************************/ + /***** DST CLIENT - REGISTER WAIT CLIENT ******/ + /**********************************************/ + /* use same context and seqno that src client used to create fence */ + ret = hw_fence_register_wait_client(drv_data, hw_fence_client_dst, context, seqno); + if (ret) { + HWFNC_ERR("failed to register for wait\n"); + return -EINVAL; + } + + /*********************************************/ + /***** SRC CLIENT - TRIGGER IPCC SIGNAL ******/ + /*********************************************/ + + /* AFTER THIS IS WHEN SVM WILL GET CALLED AND WILL PROCESS SRC AND DST CLIENTS */ + + /* Trigger IPCC for SVM to read the queue */ + + /* Get signal-id that hw-fence driver would trigger for this client */ + signal_id = dbg_out_clients_signal_map_no_dpu[client_id_src].ipc_signal_id; + if (signal_id < 0) + return -EINVAL; + + /* Write to ipcc to trigger the irq */ + tx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + rx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + HWFNC_DBG_IRQ("client:%d tx_client:%d rx_client:%d signal:%d delay:%d in_data%d\n", + client_id_src, tx_client, rx_client, signal_id, + drv_data->debugfs_data.hw_fence_sim_release_delay, input_data); + + hw_fence_ipcc_trigger_signal(drv_data, tx_client, rx_client, signal_id); + + /********************************************/ + /******** WAIT ******************************/ + /********************************************/ + + /* wait between iterations */ + usleep_range(drv_data->debugfs_data.hw_fence_sim_release_delay, + (drv_data->debugfs_data.hw_fence_sim_release_delay + 5)); + + /******************************************/ + /***** SRC CLIENT - CLEANUP HW FENCE ******/ + /******************************************/ + + /* cleanup hw fence for src client */ + ret = hw_fence_destroy(drv_data, hw_fence_client, context, seqno); + if (ret) { + HWFNC_ERR("Error destroying HW fence\n"); + goto exit; + } + + input_data--; + } /* LOOP.. */ + +exit: + return count; +} + +/** + * hw_fence_dbg_create_wr() - debugfs write to simulate the creation of a hw-fence. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter the client-id, for which the hw-fence will be created. + * Note that this simulation relies in the user first registering the client as a debug-client + * through the debugfs 'hw_fence_dbg_register_clients_wr'. If the client is not previously + * registered as debug-client, this simulation will fail and won't run. + */ +static ssize_t hw_fence_dbg_create_wr(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct msm_hw_fence_create_params params; + struct hw_fence_driver_data *drv_data; + struct client_data *client_info; + struct hw_dma_fence *dma_fence; + spinlock_t *fence_lock; + static u64 hw_fence_dbg_seqno = 1; + int client_id, ret; + u64 hash; + + client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data); + if (client_id < 0) + return -EINVAL; + + client_info = _get_client_node(drv_data, client_id); + if (!client_info || IS_ERR_OR_NULL(client_info->client_handle)) { + HWFNC_ERR("client:%d not registered as debug client\n", client_id); + return -EINVAL; + } + + /* create debug dma_fence */ + fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL); + if (!fence_lock) + return -ENOMEM; + + dma_fence = kzalloc(sizeof(*dma_fence), GFP_KERNEL); + if (!dma_fence) { + kfree(fence_lock); + return -ENOMEM; + } + + snprintf(dma_fence->name, HW_FENCE_NAME_SIZE, "hwfence:id:%d:ctx=%lu:seqno:%lu", + client_id, client_info->dma_context, hw_fence_dbg_seqno); + + spin_lock_init(fence_lock); + dma_fence_init(&dma_fence->base, &hw_fence_dbg_ops, fence_lock, + client_info->dma_context, hw_fence_dbg_seqno); + + HWFNC_DBG_H("creating hw_fence for client:%d ctx:%llu seqno:%llu\n", client_id, + client_info->dma_context, hw_fence_dbg_seqno); + params.fence = &dma_fence->base; + params.handle = &hash; + ret = msm_hw_fence_create(client_info->client_handle, ¶ms); + if (ret) { + HWFNC_ERR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n", + client_id, client_info->dma_context, hw_fence_dbg_seqno); + dma_fence_put(&dma_fence->base); + return -EINVAL; + } + hw_fence_dbg_seqno++; + + /* keep handle in dma_fence, to destroy hw-fence during release */ + dma_fence->client_handle = client_info->client_handle; + + return count; +} + +#define HFENCE_TBL_MSG \ + "[%d]hfence[%d] v:%d err:%d ctx:%d seqno:%d wait:0x%llx alloc:%d f:0x%lx tt:%llu wt:%llu\n" + +static inline int _dump_fence(struct msm_hw_fence *hw_fence, char *buf, int len, int max_size, + u32 index, u32 cnt) +{ + int ret; + + ret = scnprintf(buf + len, max_size - len, HFENCE_TBL_MSG, + cnt, index, hw_fence->valid, hw_fence->error, + hw_fence->ctx_id, hw_fence->seq_id, + hw_fence->wait_client_mask, hw_fence->fence_allocator, + hw_fence->flags, hw_fence->fence_trigger_time, hw_fence->fence_wait_time); + + HWFNC_DBG_L(HFENCE_TBL_MSG, + cnt, index, hw_fence->valid, hw_fence->error, + hw_fence->ctx_id, hw_fence->seq_id, + hw_fence->wait_client_mask, hw_fence->fence_allocator, + hw_fence->flags, hw_fence->fence_trigger_time, hw_fence->fence_wait_time); + + return ret; +} + +static int dump_single_entry(struct hw_fence_driver_data *drv_data, char *buf, u32 *index, + int max_size) +{ + struct msm_hw_fence *hw_fence; + u64 context, seqno, hash = 0; + int len = 0; + + context = drv_data->debugfs_data.context_rd; + seqno = drv_data->debugfs_data.seqno_rd; + + hw_fence = msm_hw_fence_find(drv_data, NULL, context, seqno, &hash); + if (!hw_fence) { + HWFNC_ERR("no valid hfence found for context:%lu seqno:%lu", context, seqno, hash); + len = scnprintf(buf + len, max_size - len, + "no valid hfence found for context:%lu seqno:%lu hash:%lu\n", + context, seqno, hash); + + goto exit; + } + + len = _dump_fence(hw_fence, buf, len, max_size, hash, 0); + +exit: + /* move idx to end of table to stop the dump */ + *index = drv_data->hw_fences_tbl_cnt; + + return len; +} + +static int dump_full_table(struct hw_fence_driver_data *drv_data, char *buf, u32 *index, + u32 *cnt, int max_size, int entry_size) +{ + struct msm_hw_fence *hw_fence; + int len = 0; + + while (((*index)++ < drv_data->hw_fences_tbl_cnt) && (len < (max_size - entry_size))) { + hw_fence = &drv_data->hw_fences_tbl[*index]; + + if (!hw_fence->valid) + continue; + + len += _dump_fence(hw_fence, buf, len, max_size, *index, *cnt); + (*cnt)++; + } + + return len; +} + +/** + * hw_fence_dbg_dump_table_rd() - debugfs read to dump the hw-fences table. + * @file: file handler. + * @user_buf: user buffer content for debugfs. + * @user_buf_size: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs dumps the hw-fence table. By default debugfs will dump all the valid entries of the + * whole table. However, if user only wants to dump only one particular entry, user can provide the + * context-id and seqno of the dma-fence of interest by writing to this debugfs node (see + * documentation for the write in 'hw_fence_dbg_dump_table_wr'). + */ +static ssize_t hw_fence_dbg_dump_table_rd(struct file *file, char __user *user_buf, + size_t user_buf_size, loff_t *ppos) +{ + struct hw_fence_driver_data *drv_data; + int entry_size = sizeof(struct msm_hw_fence); + char *buf = NULL; + int len = 0, max_size = SZ_4K; + static u32 index, cnt; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data %d\n", file); + return -EINVAL; + } + drv_data = file->private_data; + + if (!drv_data->hw_fences_tbl) { + HWFNC_ERR("Failed to dump table: Null fence table\n"); + return -EINVAL; + } + + if (index >= drv_data->hw_fences_tbl_cnt) { + HWFNC_DBG_H("no more data index:%d cnt:%d\n", index, drv_data->hw_fences_tbl_cnt); + index = cnt = 0; + return 0; + } + + if (user_buf_size < entry_size) { + HWFNC_ERR("Not enough buff size:%d to dump entries:%d\n", user_buf_size, + entry_size); + return -EINVAL; + } + + buf = kzalloc(max_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len = drv_data->debugfs_data.entry_rd ? + dump_single_entry(drv_data, buf, &index, max_size) : + dump_full_table(drv_data, buf, &index, &cnt, max_size, entry_size); + + if (len <= 0 || len > user_buf_size) { + HWFNC_ERR("len:%d invalid buff size:%d\n", len, user_buf_size); + len = 0; + goto exit; + } + + if (copy_to_user(user_buf, buf, len)) { + HWFNC_ERR("failed to copy to user!\n"); + len = -EFAULT; + goto exit; + } + *ppos += len; +exit: + kfree(buf); + return len; +} + +/** + * hw_fence_dbg_dump_table_wr() - debugfs write to control the dump of the hw-fences table. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @user_buf_size: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameters the settings to dump either the whole hw-fences table + * or only one element on the table in the next read of the same debugfs node. + * If this debugfs receives two input values, it will interpret them as the 'context-id' and the + * 'sequence-id' to dump from the hw-fence table in the subsequent reads of the debugfs. + * Otherwise, if the debugfs receives only one input value, the next read from the debugfs, will + * dump the whole hw-fences table. + */ +static ssize_t hw_fence_dbg_dump_table_wr(struct file *file, + const char __user *user_buf, size_t user_buf_size, loff_t *ppos) +{ + struct hw_fence_driver_data *drv_data; + u64 param_0, param_1; + char buf[24]; + int num_input_params; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data %d\n", file); + return -EINVAL; + } + drv_data = file->private_data; + + if (user_buf_size >= sizeof(buf)) { + HWFNC_ERR("wrong size:%d size:%d\n", user_buf_size, sizeof(buf)); + return -EFAULT; + } + + if (copy_from_user(buf, user_buf, user_buf_size)) + return -EFAULT; + + buf[user_buf_size] = 0; /* end of string */ + + /* read the input params */ + num_input_params = sscanf(buf, "%lu %lu", ¶m_0, ¶m_1); + + if (num_input_params == 2) { /* if debugfs receives two input params */ + drv_data->debugfs_data.context_rd = param_0; + drv_data->debugfs_data.seqno_rd = param_1; + drv_data->debugfs_data.entry_rd = true; + } else if (num_input_params == 1) { /* if debugfs receives one param */ + drv_data->debugfs_data.context_rd = 0; + drv_data->debugfs_data.seqno_rd = 0; + drv_data->debugfs_data.entry_rd = false; + } else { + HWFNC_ERR("invalid num params:%d\n", num_input_params); + return -EFAULT; + } + + return user_buf_size; +} + +static void _cleanup_fences(int i, struct dma_fence **fences, spinlock_t **fences_lock) +{ + struct hw_dma_fence *dma_fence; + int idx; + + for (idx = i; idx >= 0 ; idx--) { + kfree(fences_lock[idx]); + + dma_fence = to_hw_dma_fence(fences[idx]); + kfree(dma_fence); + } + + kfree(fences_lock); + kfree(fences); +} + +/** + * hw_fence_dbg_create_join_fence() - debugfs write to simulate the lifecycle of a join hw-fence. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs will: create, signal, register-for-signal and destroy a join hw-fence. + * Note that this simulation relies in the user first registering the clients as debug-clients + * through the debugfs 'hw_fence_dbg_register_clients_wr'. If the clients are not previously + * registered as debug-clients, this simulation will fail and won't run. + */ +static ssize_t hw_fence_dbg_create_join_fence(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct dma_fence_array *fence_array; + struct hw_fence_driver_data *drv_data; + struct dma_fence *fence_array_fence; + struct client_data *client_info_src, *client_info_dst; + u64 hw_fence_dbg_seqno = 1; + int client_id_src, client_id_dst; + struct msm_hw_fence_create_params params; + int i, ret = 0; + u64 hash; + struct msm_hw_fence_client *hw_fence_client; + int tx_client, rx_client, signal_id; + + /* creates 3 fences and a parent fence */ + int num_fences = 3; + struct dma_fence **fences = NULL; + spinlock_t **fences_lock = NULL; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data %d\n", file); + return -EINVAL; + } + drv_data = file->private_data; + client_id_src = HW_FENCE_CLIENT_ID_CTL0; + client_id_dst = HW_FENCE_CLIENT_ID_CTL1; + client_info_src = _get_client_node(drv_data, client_id_src); + client_info_dst = _get_client_node(drv_data, client_id_dst); + if (!client_info_src || IS_ERR_OR_NULL(client_info_src->client_handle) || + !client_info_dst || IS_ERR_OR_NULL(client_info_dst->client_handle)) { + HWFNC_ERR("client_src:%d or client:%d is not register as debug client\n", + client_id_src, client_id_dst); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_info_src->client_handle; + + fences_lock = kcalloc(num_fences, sizeof(*fences_lock), GFP_KERNEL); + if (!fences_lock) + return -ENOMEM; + + fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); + if (!fences) { + kfree(fences_lock); + return -ENOMEM; + } + + /* Create the array of dma fences */ + for (i = 0; i < num_fences; i++) { + struct hw_dma_fence *dma_fence; + + fences_lock[i] = kzalloc(sizeof(*fences_lock), GFP_KERNEL); + if (!fences_lock[i]) { + _cleanup_fences(i, fences, fences_lock); + return -ENOMEM; + } + + dma_fence = kzalloc(sizeof(*dma_fence), GFP_KERNEL); + if (!dma_fence) { + _cleanup_fences(i, fences, fences_lock); + return -ENOMEM; + } + fences[i] = &dma_fence->base; + + spin_lock_init(fences_lock[i]); + dma_fence_init(fences[i], &hw_fence_dbg_ops, fences_lock[i], + client_info_src->dma_context, hw_fence_dbg_seqno + i); + } + + /* create the fence array from array of dma fences */ + fence_array = dma_fence_array_create(num_fences, fences, + client_info_src->dma_context, hw_fence_dbg_seqno + num_fences, 0); + if (!fence_array) { + HWFNC_ERR("Error creating fence_array\n"); + _cleanup_fences(num_fences - 1, fences, fences_lock); + return -EINVAL; + } + + /* create hw fence and write to tx queue for each dma fence */ + for (i = 0; i < num_fences; i++) { + params.fence = fences[i]; + params.handle = &hash; + + ret = msm_hw_fence_create(client_info_src->client_handle, ¶ms); + if (ret) { + HWFNC_ERR("Error creating HW fence\n"); + count = -EINVAL; + goto error; + } + + /* Write to Tx queue */ + hw_fence_update_queue(drv_data, hw_fence_client, client_info_src->dma_context, + hw_fence_dbg_seqno + i, hash, 0, 0, + HW_FENCE_TX_QUEUE - 1); + } + + /* wait on the fence array */ + fence_array_fence = &fence_array->base; + msm_hw_fence_wait_update(client_info_dst->client_handle, &fence_array_fence, 1, 1); + + signal_id = dbg_out_clients_signal_map_no_dpu[client_id_src].ipc_signal_id; + if (signal_id < 0) { + count = -EINVAL; + goto error; + } + + /* write to ipcc to trigger the irq */ + tx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + rx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + hw_fence_ipcc_trigger_signal(drv_data, tx_client, rx_client, signal_id); + + usleep_range(drv_data->debugfs_data.hw_fence_sim_release_delay, + (drv_data->debugfs_data.hw_fence_sim_release_delay + 5)); + +error: + /* this frees the memory for the fence-array and each dma-fence */ + dma_fence_put(&fence_array->base); + + /* + * free array of pointers, no need to call kfree in 'fences', since that is released + * from the fence-array release api + */ + kfree(fences_lock); + + return count; +} + +static const struct file_operations hw_fence_reset_client_fops = { + .open = simple_open, + .write = hw_fence_dbg_reset_client_wr, +}; + +static const struct file_operations hw_fence_register_clients_fops = { + .open = simple_open, + .write = hw_fence_dbg_register_clients_wr, +}; + +static const struct file_operations hw_fence_tx_and_signal_clients_fops = { + .open = simple_open, + .write = hw_fence_dbg_tx_and_signal_clients_wr, +}; + +static const struct file_operations hw_fence_create_fops = { + .open = simple_open, + .write = hw_fence_dbg_create_wr, +}; + +static const struct file_operations hw_fence_dump_table_fops = { + .open = simple_open, + .write = hw_fence_dbg_dump_table_wr, + .read = hw_fence_dbg_dump_table_rd, +}; + +static const struct file_operations hw_fence_create_join_fence_fops = { + .open = simple_open, + .write = hw_fence_dbg_create_join_fence, +}; + +int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data) +{ + struct dentry *debugfs_root; + + debugfs_root = debugfs_create_dir("hw_fence", NULL); + if (IS_ERR_OR_NULL(debugfs_root)) { + HWFNC_ERR("debugfs_root create_dir fail, error %ld\n", + PTR_ERR(debugfs_root)); + drv_data->debugfs_data.root = NULL; + return -EINVAL; + } + + mutex_init(&drv_data->debugfs_data.clients_list_lock); + INIT_LIST_HEAD(&drv_data->debugfs_data.clients_list); + drv_data->debugfs_data.root = debugfs_root; + drv_data->debugfs_data.create_hw_fences = true; + drv_data->debugfs_data.hw_fence_sim_release_delay = 8333; /* uS */ + + debugfs_create_file("ipc_trigger", 0600, debugfs_root, drv_data, + &hw_fence_dbg_ipcc_fops); +#ifdef HW_DPU_IPCC + debugfs_create_file("dpu_trigger", 0600, debugfs_root, drv_data, + &hw_fence_dbg_ipcc_dpu_fops); +#endif /* HW_DPU_IPCC */ + debugfs_create_file("hw_fence_reset_client", 0600, debugfs_root, drv_data, + &hw_fence_reset_client_fops); + debugfs_create_file("hw_fence_register_clients", 0600, debugfs_root, drv_data, + &hw_fence_register_clients_fops); + debugfs_create_file("hw_fence_tx_and_signal", 0600, debugfs_root, drv_data, + &hw_fence_tx_and_signal_clients_fops); + debugfs_create_file("hw_fence_create_join_fence", 0600, debugfs_root, drv_data, + &hw_fence_create_join_fence_fops); + debugfs_create_bool("create_hw_fences", 0600, debugfs_root, + &drv_data->debugfs_data.create_hw_fences); + debugfs_create_u32("sleep_range_us", 0600, debugfs_root, + &drv_data->debugfs_data.hw_fence_sim_release_delay); + debugfs_create_file("hw_fence_create", 0600, debugfs_root, drv_data, + &hw_fence_create_fops); + debugfs_create_u32("hw_fence_debug_level", 0600, debugfs_root, &msm_hw_fence_debug_level); + debugfs_create_file("hw_fence_dump_table", 0600, debugfs_root, drv_data, + &hw_fence_dump_table_fops); + + return 0; +} + +#else +int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data) +{ + return 0; +} +#endif /* CONFIG_DEBUG_FS */ diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c new file mode 100644 index 0000000000..7879d4f788 --- /dev/null +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_debug.h" + +/** + * struct hw_fence_client_ipc_map - map client id with ipc signal for trigger. + * @ipc_client_id: ipc client id for the hw-fence client. + * @ipc_signal_id: ipc signal id for the hw-fence client. + * @update_rxq: bool to indicate if clinet uses rx-queue. + */ +struct hw_fence_client_ipc_map { + int ipc_client_id; + int ipc_signal_id; + bool update_rxq; +}; + +/** + * struct hw_fence_clients_ipc_map_no_dpu - Table makes the 'client to signal' mapping, which + * is used by the hw fence driver to trigger ipc signal when the hw fence is already + * signaled. + * This no_dpu version is for targets that do not support dpu client id + * + * Notes: + * The index of this struct must match the enum hw_fence_client_id. + * To change to a loopback signal instead of GMU, change ctx0 row to use: + * {HW_FENCE_IPC_CLIENT_ID_APPS, 20}. + */ +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_MAX] = { + {HW_FENCE_IPC_CLIENT_ID_APPS, 1, true}, /* ctrl queue loopback */ + {HW_FENCE_IPC_CLIENT_ID_GPU, 0, true}, /* ctx0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 14, false}, /* ctl0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 15, false}, /* ctl1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 16, false}, /* ctl2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 17, false}, /* ctl3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 18, false}, /* ctl4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 19, false}, /* ctl5 */ +}; + +/** + * struct hw_fence_clients_ipc_map - Table makes the 'client to signal' mapping, which is + * used by the hw fence driver to trigger ipc signal when hw fence is already + * signaled. + * This version is for targets that support dpu client id. + * + * Note that the index of this struct must match the enum hw_fence_client_id + */ +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = { + {HW_FENCE_IPC_CLIENT_ID_APPS, 1, true}, /* ctrl queue loopback */ + {HW_FENCE_IPC_CLIENT_ID_GPU, 0, true}, /* ctx0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 0, false}, /* ctl0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 1, false}, /* ctl1 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 2, false}, /* ctl2 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 3, false}, /* ctl3 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 4, false}, /* ctl4 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 5, false}, /* ctl5 */ +}; + +int hw_fence_ipcc_get_client_id(struct hw_fence_driver_data *drv_data, u32 client_id) +{ + if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + return -EINVAL; + + return drv_data->ipc_clients_table[client_id].ipc_client_id; +} + +int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id) +{ + if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + return -EINVAL; + + return drv_data->ipc_clients_table[client_id].ipc_signal_id; +} + +bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id) +{ + if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + return -EINVAL; + + return drv_data->ipc_clients_table[client_id].update_rxq; +} + +/** + * _get_ipc_client_name() - Returns ipc client name, used for debugging. + */ +static inline char *_get_ipc_client_name(u32 client_id) +{ + switch (client_id) { + case HW_FENCE_IPC_CLIENT_ID_APPS: + return "APPS"; + case HW_FENCE_IPC_CLIENT_ID_GPU: + return "GPU"; + case HW_FENCE_IPC_CLIENT_ID_DPU: + return "DPU"; + } + + return "UNKNOWN"; +} + +void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data, + u32 tx_client_id, u32 rx_client_id, u32 signal_id) +{ + void __iomem *ptr; + u32 val; + + /* Send signal */ + ptr = IPC_PROTOCOLp_CLIENTc_SEND(drv_data->ipcc_io_mem, drv_data->protocol_id, + tx_client_id); + val = (rx_client_id << 16) | signal_id; + + HWFNC_DBG_IRQ("Sending ipcc from %s (%d) to %s (%d) signal_id:%d [wr:0x%x to off:0x%pK]\n", + _get_ipc_client_name(tx_client_id), tx_client_id, + _get_ipc_client_name(rx_client_id), rx_client_id, + signal_id, val, ptr); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); + writel_relaxed(val, ptr); + + /* Make sure value is written */ + wmb(); +} + +/** + * _hw_fence_ipcc_hwrev_init() - Initializes internal driver struct with corresponding ipcc data, + * according to the ipcc hw revision. + * @drv_data: driver data. + * @hwrev: ipcc hw revision. + */ +static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 hwrev) +{ + switch (hwrev) { + case HW_FENCE_IPCC_HW_REV_100: + drv_data->ipcc_client_id = HW_FENCE_IPC_CLIENT_ID_APPS; + drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA; + drv_data->ipc_clients_table = hw_fence_clients_ipc_map_no_dpu; + HWFNC_DBG_INIT("ipcc protocol_id: Lahaina\n"); + break; + case HW_FENCE_IPCC_HW_REV_110: + drv_data->ipcc_client_id = HW_FENCE_IPC_CLIENT_ID_APPS; + drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO; + drv_data->ipc_clients_table = hw_fence_clients_ipc_map_no_dpu; + HWFNC_DBG_INIT("ipcc protocol_id: Waipio\n"); + break; + case HW_FENCE_IPCC_HW_REV_170: + drv_data->ipcc_client_id = HW_FENCE_IPC_CLIENT_ID_APPS; + drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KAILUA; + drv_data->ipc_clients_table = hw_fence_clients_ipc_map; + HWFNC_DBG_INIT("ipcc protocol_id: Kailua\n"); + break; + default: + return -1; + } + + return 0; +} + +int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) +{ + void __iomem *ptr; + u32 val; + + HWFNC_DBG_H("enable ipc +\n"); + + /* Read IPC Version from Client=0x8 (apps) for protocol=2 (compute_l1) */ + val = readl_relaxed(IPC_PROTOCOLp_CLIENTc_VERSION(drv_data->ipcc_io_mem, + HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA, HW_FENCE_IPC_CLIENT_ID_APPS)); + HWFNC_DBG_INIT("ipcc version:0x%x\n", val); + + if (_hw_fence_ipcc_hwrev_init(drv_data, val)) { + HWFNC_ERR("ipcc protocol id not supported\n"); + return -EINVAL; + } + + /* Enable compute l1 (protocol_id = 2) */ + val = 0x00000000; + ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id, + HW_FENCE_IPC_CLIENT_ID_APPS); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); + writel_relaxed(val, ptr); + + /* Enable Client-Signal pairs from APPS(NS) (0x8) to APPS(NS) (0x8) */ + val = 0x000080000; + ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, drv_data->protocol_id, + HW_FENCE_IPC_CLIENT_ID_APPS); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); + writel_relaxed(val, ptr); + + HWFNC_DBG_H("enable ipc -\n"); + + return 0; +} + +#ifdef HW_DPU_IPCC +int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data) +{ + struct hw_fence_client_ipc_map *hw_fence_client; + void __iomem *ptr; + u32 val; + int i; + + HWFNC_DBG_H("enable dpu ipc +\n"); + + if (!drv_data || !drv_data->protocol_id || !drv_data->ipc_clients_table) { + HWFNC_ERR("invalid drv data\n"); + return -1; + } + + HWFNC_DBG_H("ipcc_io_mem:0x%lx\n", (u64)drv_data->ipcc_io_mem); + + /* + * Enable compute l1 (protocol_id = 2) for dpu (25) + * Sets bit(1) to clear when RECV_ID is read + */ + val = 0x00000001; + ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id, + HW_FENCE_IPC_CLIENT_ID_DPU); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr); + writel_relaxed(val, ptr); + + HWFNC_DBG_H("Initialize dpu signals\n"); + /* Enable Client-Signal pairs from DPU (25) to APPS(NS) (8) */ + for (i = 0; i < HW_FENCE_CLIENT_MAX; i++) { + hw_fence_client = &drv_data->ipc_clients_table[i]; + + /* skip any client that is not a dpu client */ + if (hw_fence_client->ipc_client_id != HW_FENCE_IPC_CLIENT_ID_DPU) + continue; + + /* Enable signals for dpu client */ + HWFNC_DBG_H("dpu:%d client:%d signal:%d\n", hw_fence_client->ipc_client_id, i, + hw_fence_client->ipc_signal_id); + val = 0x000080000 | (hw_fence_client->ipc_signal_id & 0xFFFF); + ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, + drv_data->protocol_id, HW_FENCE_IPC_CLIENT_ID_DPU); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr); + writel_relaxed(val, ptr); + } + + HWFNC_DBG_H("enable dpu ipc -\n"); + + return 0; +} +#endif /* HW_DPU_IPCC */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c new file mode 100644 index 0000000000..edecc41cbc --- /dev/null +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -0,0 +1,1317 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_debug.h" + +/* Global atomic lock */ +#define GLOBAL_ATOMIC_STORE(lock, val) global_atomic_store(lock, val) + +inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data) +{ + return readl_relaxed(drv_data->qtime_io_mem); +} + +static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, + enum hw_fence_mem_reserve mem_reserve_id, + struct msm_hw_fence_mem_addr *mem_descriptor, + struct msm_hw_fence_queue *queues, int queues_num, + int client_id) +{ + struct msm_hw_fence_hfi_queue_table_header *hfi_table_header; + struct msm_hw_fence_hfi_queue_header *hfi_queue_header; + void *ptr, *qptr; + phys_addr_t phys, qphys; + u32 size, start_queue_offset; + int headers_size, queue_size; + int i, ret = 0; + + HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id); + switch (mem_reserve_id) { + case HW_FENCE_MEM_RESERVE_CTRL_QUEUE: + headers_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE; + queue_size = drv_data->hw_fence_ctrl_queue_size; + break; + case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: + headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE; + queue_size = drv_data->hw_fence_client_queue_size; + break; + default: + HWFNC_ERR("Unexpected mem reserve id: %d\n", mem_reserve_id); + return -EINVAL; + } + + /* Reserve Virtual and Physical memory for HFI headers */ + ret = hw_fence_utils_reserve_mem(drv_data, mem_reserve_id, &phys, &ptr, &size, client_id); + if (ret) { + HWFNC_ERR("Failed to reserve id:%d client %d\n", mem_reserve_id, client_id); + return -ENOMEM; + } + HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size); + + /* Populate Memory descriptor with address */ + mem_descriptor->virtual_addr = ptr; + mem_descriptor->device_addr = phys; + mem_descriptor->size = size; /* bytes */ + mem_descriptor->mem_data = NULL; /* Currently we don't need any special info */ + + HWFNC_DBG_INIT("Initialize headers\n"); + /* Initialize headers info within hfi memory */ + hfi_table_header = (struct msm_hw_fence_hfi_queue_table_header *)ptr; + hfi_table_header->version = 0; + hfi_table_header->size = size; /* bytes */ + /* Offset, from the Base Address, where the first queue header starts */ + hfi_table_header->qhdr0_offset = + sizeof(struct msm_hw_fence_hfi_queue_table_header); + hfi_table_header->qhdr_size = + sizeof(struct msm_hw_fence_hfi_queue_header); + hfi_table_header->num_q = queues_num; /* number of queues */ + hfi_table_header->num_active_q = queues_num; + + /* Initialize Queues Info within HFI memory */ + + /* + * Calculate offset where hfi queue header starts, which it is at the + * end of the hfi table header + */ + HWFNC_DBG_INIT("Initialize queues\n"); + hfi_queue_header = (struct msm_hw_fence_hfi_queue_header *) + ((char *)ptr + HW_FENCE_HFI_TABLE_HEADER_SIZE); + for (i = 0; i < queues_num; i++) { + HWFNC_DBG_INIT("init queue[%d]\n", i); + + /* Calculate the offset where the Queue starts */ + start_queue_offset = headers_size + (i * queue_size); /* Bytes */ + qphys = phys + start_queue_offset; /* start of the PA for the queue elems */ + qptr = (char *)ptr + start_queue_offset; /* start of the va for queue elems */ + + /* Set the physical start address in the HFI queue header */ + hfi_queue_header->start_addr = qphys; + + /* Set the queue type (i.e. RX or TX queue) */ + hfi_queue_header->type = (i == 0) ? HW_FENCE_TX_QUEUE : HW_FENCE_RX_QUEUE; + + /* Set the size of this header */ + hfi_queue_header->queue_size = queue_size; + + /* Store Memory info in the Client data */ + queues[i].va_queue = qptr; + queues[i].pa_queue = qphys; + queues[i].va_header = hfi_queue_header; + queues[i].q_size_bytes = queue_size; + HWFNC_DBG_INIT("init:%s client:%d queue[%d]: va=0x%pK pa=0x%x va_hd:0x%pK sz:%d\n", + hfi_queue_header->type == HW_FENCE_TX_QUEUE ? "TX_QUEUE" : "RX_QUEUE", + client_id, i, queues[i].va_queue, queues[i].pa_queue, queues[i].va_header, + queues[i].q_size_bytes); + + /* Next header */ + hfi_queue_header++; + } + + return ret; +} + +static inline _lock_client_queue(int queue_type) +{ + /* Only lock Rx Queue */ + return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? true : false; +} + +char *_get_queue_type(int queue_type) +{ + return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? "RXQ" : "TXQ"; +} + +int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, + struct msm_hw_fence_queue_payload *payload, int queue_type) +{ + struct msm_hw_fence_hfi_queue_header *hfi_header; + struct msm_hw_fence_queue *queue; + u32 read_idx; + u32 write_idx; + u32 to_read_idx; + u32 *read_ptr; + u32 payload_size_u32; + u32 q_size_u32; + struct msm_hw_fence_queue_payload *read_ptr_payload; + + if (queue_type >= HW_FENCE_CLIENT_QUEUES || !hw_fence_client || !payload) { + HWFNC_ERR("Invalid queue type:%s hw_fence_client:0x%pK payload:0x%pK\n", queue_type, + hw_fence_client, payload); + return -EINVAL; + } + + queue = &hw_fence_client->queues[queue_type]; + hfi_header = queue->va_header; + + q_size_u32 = (queue->q_size_bytes / sizeof(u32)); + payload_size_u32 = (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)); + HWFNC_DBG_Q("sizeof payload:%d\n", sizeof(struct msm_hw_fence_queue_payload)); + + if (!hfi_header || !payload) { + HWFNC_ERR("Invalid queue\n"); + return -EINVAL; + } + + /* Get read and write index */ + read_idx = readl_relaxed(&hfi_header->read_index); + write_idx = readl_relaxed(&hfi_header->write_index); + + /* Make sure we read the values */ + rmb(); + + HWFNC_DBG_Q("read client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n", + hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, + read_idx, write_idx, queue); + + if (read_idx == write_idx) { + HWFNC_DBG_Q("Nothing to read!\n"); + return 0; + } + + /* Move the pointer where we need to read and cast it */ + read_ptr = ((u32 *)queue->va_queue + read_idx); + read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr; + HWFNC_DBG_Q("read_ptr:0x%pK queue: va=0x%pK pa=0x%pK read_ptr_payload:0x%pK\n", read_ptr, + queue->va_queue, queue->pa_queue, read_ptr_payload); + + /* Calculate the index after the read */ + to_read_idx = read_idx + payload_size_u32; + + /* + * wrap-around case, here we are reading the last element of the queue, therefore set + * to_read_idx, which is the index after the read, to the beginning of the + * queue + */ + if (to_read_idx >= q_size_u32) + to_read_idx = 0; + + /* Read the Client Queue */ + payload->ctxt_id = readq_relaxed(&read_ptr_payload->ctxt_id); + payload->seqno = readq_relaxed(&read_ptr_payload->seqno); + payload->hash = readq_relaxed(&read_ptr_payload->hash); + payload->flags = readq_relaxed(&read_ptr_payload->flags); + payload->error = readl_relaxed(&read_ptr_payload->error); + + /* update the read index */ + writel_relaxed(to_read_idx, &hfi_header->read_index); + + /* update memory for the index */ + wmb(); + + /* Return one if queue still has contents after read */ + return to_read_idx == write_idx ? 0 : 1; +} + +/* + * This function writes to the queue of the client. The 'queue_type' determines + * if this function is writing to the rx or tx queue + */ +int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, + u64 flags, u32 error, int queue_type) +{ + struct msm_hw_fence_hfi_queue_header *hfi_header; + struct msm_hw_fence_queue *queue; + u32 read_idx; + u32 write_idx; + u32 to_write_idx; + u32 q_size_u32; + u32 q_free_u32; + u32 *q_payload_write_ptr; + u32 payload_size_u32; + struct msm_hw_fence_queue_payload *write_ptr_payload; + bool lock_client = false; + u32 lock_idx; + int ret = 0; + + if (queue_type >= HW_FENCE_CLIENT_QUEUES) { + HWFNC_ERR("Invalid queue type:%s\n", queue_type); + return -EINVAL; + } + + queue = &hw_fence_client->queues[queue_type]; + hfi_header = queue->va_header; + + q_size_u32 = (queue->q_size_bytes / sizeof(u32)); + payload_size_u32 = (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)); + + if (!hfi_header) { + HWFNC_ERR("Invalid queue\n"); + return -EINVAL; + } + + /* + * We need to lock the client if there is an Rx Queue update, since that + * is the only time when HW Fence driver can have a race condition updating + * the Rx Queue, which also could be getting updated by the Fence CTL + */ + lock_client = _lock_client_queue(queue_type); + if (lock_client) { + lock_idx = hw_fence_client->client_id - 1; + + if (lock_idx >= drv_data->client_lock_tbl_cnt) { + HWFNC_ERR("lock for client id:%d exceed max:%d\n", + hw_fence_client->client_id, drv_data->client_lock_tbl_cnt); + return -EINVAL; + } + HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx); + + /* lock the client rx queue to update */ + GLOBAL_ATOMIC_STORE(&drv_data->client_lock_tbl[lock_idx], 1); /* lock */ + } + + /* Get read and write index */ + read_idx = readl_relaxed(&hfi_header->read_index); + write_idx = readl_relaxed(&hfi_header->write_index); + + /* Make sure we read the values */ + rmb(); + + HWFNC_DBG_Q("wr client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d q:0x%pK type:%d\n", + hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, + read_idx, write_idx, queue, queue_type); + + /* Check queue to make sure message will fit */ + q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) : + (read_idx - write_idx); + if (q_free_u32 <= payload_size_u32) { + HWFNC_ERR("cannot fit the message size:%d\n", payload_size_u32); + ret = -EINVAL; + goto exit; + } + HWFNC_DBG_Q("q_free_u32:%d payload_size_u32:%d\n", q_free_u32, payload_size_u32); + + /* Move the pointer where we need to write and cast it */ + q_payload_write_ptr = ((u32 *)queue->va_queue + write_idx); + write_ptr_payload = (struct msm_hw_fence_queue_payload *)q_payload_write_ptr; + HWFNC_DBG_Q("q_payload_write_ptr:0x%pK queue: va=0x%pK pa=0x%pK write_ptr_payload:0x%pK\n", + q_payload_write_ptr, queue->va_queue, queue->pa_queue, write_ptr_payload); + + /* calculate the index after the write */ + to_write_idx = write_idx + payload_size_u32; + + HWFNC_DBG_Q("to_write_idx:%d write_idx:%d payload_size\n", to_write_idx, write_idx, + payload_size_u32); + HWFNC_DBG_L("client_id:%d update %s hash:%llu ctx_id:%llu seqno:%llu flags:%llu error:%u\n", + hw_fence_client->client_id, _get_queue_type(queue_type), + hash, ctxt_id, seqno, flags, error); + + /* + * wrap-around case, here we are writing to the last element of the queue, therefore + * set to_write_idx, which is the index after the write, to the beginning of the + * queue + */ + if (to_write_idx >= q_size_u32) + to_write_idx = 0; + + /* Update Client Queue */ + writeq_relaxed(ctxt_id, &write_ptr_payload->ctxt_id); + writeq_relaxed(seqno, &write_ptr_payload->seqno); + writeq_relaxed(hash, &write_ptr_payload->hash); + writeq_relaxed(flags, &write_ptr_payload->flags); + writel_relaxed(error, &write_ptr_payload->error); + + /* update memory for the message */ + wmb(); + + /* update the write index */ + writel_relaxed(to_write_idx, &hfi_header->write_index); + + /* update memory for the index */ + wmb(); + +exit: + if (lock_client) + GLOBAL_ATOMIC_STORE(&drv_data->client_lock_tbl[lock_idx], 0); /* unlock */ + + return ret; +} + +static int init_global_locks(struct hw_fence_driver_data *drv_data) +{ + struct msm_hw_fence_mem_addr *mem_descriptor; + phys_addr_t phys; + void *ptr; + u32 size; + int ret; + + ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_LOCKS_REGION, &phys, &ptr, + &size, 0); + if (ret) { + HWFNC_ERR("Failed to reserve clients locks mem %d\n", ret); + return -ENOMEM; + } + HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size); + + /* Populate Memory descriptor with address */ + mem_descriptor = &drv_data->clients_locks_mem_desc; + mem_descriptor->virtual_addr = ptr; + mem_descriptor->device_addr = phys; + mem_descriptor->size = size; + mem_descriptor->mem_data = NULL; /* not storing special info for now */ + + /* Initialize internal pointers for managing the tables */ + drv_data->client_lock_tbl = (u64 *)drv_data->clients_locks_mem_desc.virtual_addr; + drv_data->client_lock_tbl_cnt = drv_data->clients_locks_mem_desc.size / sizeof(u64); + + return 0; +} + +static int init_hw_fences_table(struct hw_fence_driver_data *drv_data) +{ + struct msm_hw_fence_mem_addr *mem_descriptor; + phys_addr_t phys; + void *ptr; + u32 size; + int ret; + + ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_TABLE, &phys, &ptr, + &size, 0); + if (ret) { + HWFNC_ERR("Failed to reserve table mem %d\n", ret); + return -ENOMEM; + } + HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size); + + /* Populate Memory descriptor with address */ + mem_descriptor = &drv_data->hw_fences_mem_desc; + mem_descriptor->virtual_addr = ptr; + mem_descriptor->device_addr = phys; + mem_descriptor->size = size; + mem_descriptor->mem_data = NULL; /* not storing special info for now */ + + /* Initialize internal pointers for managing the tables */ + drv_data->hw_fences_tbl = (struct msm_hw_fence *)drv_data->hw_fences_mem_desc.virtual_addr; + drv_data->hw_fences_tbl_cnt = drv_data->hw_fences_mem_desc.size / + sizeof(struct msm_hw_fence); + + HWFNC_DBG_INIT("hw_fences_table:0x%pK cnt:%u\n", drv_data->hw_fences_tbl, + drv_data->hw_fences_tbl_cnt); + + return 0; +} + +static int init_ctrl_queue(struct hw_fence_driver_data *drv_data) +{ + struct msm_hw_fence_mem_addr *mem_descriptor; + int ret; + + mem_descriptor = &drv_data->ctrl_queue_mem_desc; + + /* Init ctrl queue */ + ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CTRL_QUEUE, + mem_descriptor, drv_data->ctrl_queues, + HW_FENCE_CTRL_QUEUES, 0); + if (ret) + HWFNC_ERR("Failure to init ctrl queue\n"); + + return ret; +} + +int hw_fence_init(struct hw_fence_driver_data *drv_data) +{ + int ret; + __le32 *mem; + + ret = hw_fence_utils_parse_dt_props(drv_data); + if (ret) { + HWFNC_ERR("failed to set dt properties\n"); + goto exit; + } + + /* Allocate hw fence driver mem pool and share it with HYP */ + ret = hw_fence_utils_alloc_mem(drv_data); + if (ret) { + HWFNC_ERR("failed to alloc base memory\n"); + goto exit; + } + + /* Initialize ctrl queue */ + ret = init_ctrl_queue(drv_data); + if (ret) + goto exit; + + ret = init_global_locks(drv_data); + if (ret) + goto exit; + HWFNC_DBG_INIT("Locks allocated at 0x%pK total locks:%d\n", drv_data->client_lock_tbl, + drv_data->client_lock_tbl_cnt); + + /* Initialize hw fences table */ + ret = init_hw_fences_table(drv_data); + if (ret) + goto exit; + + /* Map ipcc registers */ + ret = hw_fence_utils_map_ipcc(drv_data); + if (ret) { + HWFNC_ERR("ipcc regs mapping failed\n"); + goto exit; + } + + /* Map time register */ + ret = hw_fence_utils_map_qtime(drv_data); + if (ret) { + HWFNC_ERR("qtime reg mapping failed\n"); + goto exit; + } + + /* Map ctl_start registers */ + ret = hw_fence_utils_map_ctl_start(drv_data); + if (ret) { + /* This is not fatal error, since platfoms with dpu-ipc + * won't use this option + */ + HWFNC_WARN("no ctl_start regs, won't trigger the frame\n"); + } + + /* Init debugfs */ + ret = hw_fence_debug_debugfs_register(drv_data); + if (ret) { + HWFNC_ERR("debugfs init failed\n"); + goto exit; + } + + /* Init vIRQ from VM */ + ret = hw_fence_utils_init_virq(drv_data); + if (ret) { + HWFNC_ERR("failed to init virq\n"); + goto exit; + } + + mem = drv_data->io_mem_base; + HWFNC_DBG_H("memory ptr:0x%pK val:0x%x\n", mem, *mem); + + HWFNC_DBG_INIT("HW Fences Table Initialized: 0x%pK cnt:%d\n", + drv_data->hw_fences_tbl, drv_data->hw_fences_tbl_cnt); + +exit: + return ret; +} + +int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + struct msm_hw_fence_mem_addr *mem_descriptor) +{ + int ret; + + /* Init client queues */ + ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CLIENT_QUEUE, + &hw_fence_client->mem_descriptor, hw_fence_client->queues, + HW_FENCE_CLIENT_QUEUES, hw_fence_client->client_id); + if (ret) { + HWFNC_ERR("Failure to init the queue for client:%d\n", + hw_fence_client->client_id); + goto exit; + } + + /* Init client memory descriptor */ + memcpy(mem_descriptor, &hw_fence_client->mem_descriptor, + sizeof(struct msm_hw_fence_mem_addr)); + +exit: + return ret; +} + +int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client) +{ + int ret = 0; + + /* + * Initialize IPCC Signals for this client + * + * NOTE: Fore each Client HW-Core, the client drivers might be the ones making + * it's own initialization (in case that any hw-sequence must be enforced), + * however, if that is not the case, any per-client ipcc init to enable the + * signaling, can go here. + */ + switch (hw_fence_client->client_id) { + case HW_FENCE_CLIENT_ID_CTX0: + /* nothing to initialize for gpu client */ + break; + case HW_FENCE_CLIENT_ID_CTL0: + case HW_FENCE_CLIENT_ID_CTL1: + case HW_FENCE_CLIENT_ID_CTL2: + case HW_FENCE_CLIENT_ID_CTL3: + case HW_FENCE_CLIENT_ID_CTL4: + case HW_FENCE_CLIENT_ID_CTL5: +#ifdef HW_DPU_IPCC + /* initialize ipcc signals for dpu clients */ + HWFNC_DBG_H("init_controller_signal: DPU client:%d initialized:%d\n", + hw_fence_client->client_id, drv_data->ipcc_dpu_initialized); + if (!drv_data->ipcc_dpu_initialized) { + drv_data->ipcc_dpu_initialized = true; + + /* Init dpu client ipcc signal */ + hw_fence_ipcc_enable_dpu_signaling(drv_data); + } +#endif /* HW_DPU_IPCC */ + break; + default: + HWFNC_ERR("Unexpected client:%d\n", hw_fence_client->client_id); + ret = -EINVAL; + break; + } + + return ret; +} + +int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client) +{ + + /* + * Initialize Fence Controller resources for this Client, + * here we need to use the CTRL queue to communicate to the Fence + * Controller the shared memory for the Rx/Tx queue for this client + * as well as any information that Fence Controller might need to + * know for this client. + * + * NOTE: For now, we are doing a static allocation of the + * client's queues, so currently we don't need any notification + * to the Fence CTL here through the CTRL queue. + * Later-on we might need it, once the PVM to SVM (and vice versa) + * communication for initialization is supported. + */ + + return 0; +} + +void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client) +{ + /* + * Deallocate any resource allocated for this client. + * If fence controller was notified about existence of this client, + * we will need to notify fence controller that this client is gone + * + * NOTE: Since currently we are doing a 'fixed' memory for the clients queues, + * we don't need any notification to the Fence Controller, yet.. + * however, if the memory allocation is removed from 'fixed' to a dynamic + * allocation, then we will need to notify FenceCTL about the client that is + * going-away here. + */ + mutex_lock(&drv_data->clients_mask_lock); + drv_data->client_id_mask &= ~BIT(hw_fence_client->client_id); + drv_data->clients[hw_fence_client->client_id] = NULL; + mutex_unlock(&drv_data->clients_mask_lock); + + /* Deallocate client's object */ + HWFNC_DBG_LUT("freeing client_id:%d\n", hw_fence_client->client_id); + kfree(hw_fence_client); +} + +static inline int _calculate_hash(u32 table_total_entries, u64 context, u64 seqno, + u64 step, u64 *hash) +{ + u64 m_size = table_total_entries; + int val = 0; + + if (step == 0) { + u64 a_multiplier = HW_FENCE_HASH_A_MULT; + u64 c_multiplier = HW_FENCE_HASH_C_MULT; + u64 b_multiplier = context + (context - 1); /* odd multiplier */ + + /* + * if m, is power of 2, we can optimize with right shift, + * for now we don't do it, to avoid assuming a power of two + */ + *hash = (a_multiplier * seqno * b_multiplier + (c_multiplier * context)) % m_size; + } else { + if (step >= m_size) { + /* + * If we already traversed the whole table, return failure since this means + * there are not available spots, table is either full or full-enough + * that we couldn't find an available spot after traverse the whole table. + * Ideally table shouldn't be so full that we cannot find a value after some + * iterations, so this maximum step size could be optimized to fail earlier. + */ + HWFNC_ERR("Fence Table tranversed and no available space!\n"); + val = -EINVAL; + } else { + /* + * Linearly increment the hash value to find next element in the table + * note that this relies in the 'scrambled' data from the original hash + * Also, add a mod division to wrap-around in case that we reached the + * end of the table + */ + *hash = (*hash + 1) % m_size; + } + } + + return val; +} + +static inline struct msm_hw_fence *_get_hw_fence(u32 table_total_entries, + struct msm_hw_fence *hw_fences_tbl, + u64 hash) +{ + if (hash >= table_total_entries) { + HWFNC_ERR("hash:%llu out of max range:%llu\n", + hash, table_total_entries); + return NULL; + } + + return &hw_fences_tbl[hash]; +} + +static bool _is_hw_fence_free(struct msm_hw_fence *hw_fence, u64 context, u64 seqno) +{ + /* If valid is set, the hw fence is not free */ + return hw_fence->valid ? false : true; +} + +static bool _hw_fence_match(struct msm_hw_fence *hw_fence, u64 context, u64 seqno) +{ + return ((hw_fence->ctx_id == context && hw_fence->seq_id == seqno) ? true : false); +} + +/* clears everything but the 'valid' field */ +static void _cleanup_hw_fence(struct msm_hw_fence *hw_fence) +{ + int i; + + hw_fence->error = 0; + wmb(); /* update memory to avoid mem-abort */ + hw_fence->ctx_id = 0; + hw_fence->seq_id = 0; + hw_fence->wait_client_mask = 0; + hw_fence->fence_allocator = 0; + hw_fence->fence_signal_client = 0; + + hw_fence->flags = 0; + + hw_fence->fence_create_time = 0; + hw_fence->fence_trigger_time = 0; + hw_fence->fence_wait_time = 0; + hw_fence->debug_refcount = 0; + hw_fence->parents_cnt = 0; + hw_fence->pending_child_cnt = 0; + + for (i = 0; i < MSM_HW_FENCE_MAX_JOIN_PARENTS; i++) + hw_fence->parent_list[i] = HW_FENCE_INVALID_PARENT_FENCE; +} + +/* This function must be called with the hw fence lock */ +static void _reserve_hw_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u32 client_id, + u64 context, u64 seqno, u32 hash, u32 pending_child_cnt) +{ + _cleanup_hw_fence(hw_fence); + + /* reserve this HW fence */ + hw_fence->valid = 1; + + hw_fence->ctx_id = context; + hw_fence->seq_id = seqno; + hw_fence->flags = 0; /* fence just reserved, there shouldn't be any flags set */ + hw_fence->fence_allocator = client_id; + hw_fence->fence_create_time = hw_fence_get_qtime(drv_data); + hw_fence->debug_refcount++; + + HWFNC_DBG_LUT("Reserved fence client:%d ctx:%llu seq:%llu hash:%llu\n", + client_id, context, seqno, hash); +} + +/* This function must be called with the hw fence lock */ +static void _unreserve_hw_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u32 client_id, + u64 context, u64 seqno, u32 hash, u32 pending_child_cnt) +{ + _cleanup_hw_fence(hw_fence); + + /* unreserve this HW fence */ + hw_fence->valid = 0; + + HWFNC_DBG_LUT("Unreserved fence client:%d ctx:%llu seq:%llu hash:%llu\n", + client_id, context, seqno, hash); +} + +/* This function must be called with the hw fence lock */ +static void _reserve_join_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u32 client_id, u64 context, + u64 seqno, u32 hash, u32 pending_child_cnt) +{ + _cleanup_hw_fence(hw_fence); + + /* reserve this HW fence */ + hw_fence->valid = true; + + hw_fence->ctx_id = context; + hw_fence->seq_id = seqno; + hw_fence->fence_allocator = client_id; + hw_fence->fence_create_time = hw_fence_get_qtime(drv_data); + hw_fence->debug_refcount++; + + hw_fence->pending_child_cnt = pending_child_cnt; + + HWFNC_DBG_LUT("Reserved join fence client:%d ctx:%llu seq:%llu hash:%llu\n", + client_id, context, seqno, hash); +} + +/* This function must be called with the hw fence lock */ +static void _fence_found(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u32 client_id, + u64 context, u64 seqno, u32 hash, u32 pending_child_cnt) +{ + /* + * Do nothing, when this find fence fn is invoked, all processing is done outside. + * Currently just keeping this function for debugging purposes, can be removed + * in final versions + */ + HWFNC_DBG_LUT("Found fence client:%d ctx:%llu seq:%llu hash:%llu\n", + client_id, context, seqno, hash); +} + +char *_get_op_mode(enum hw_fence_lookup_ops op_code) +{ + switch (op_code) { + case HW_FENCE_LOOKUP_OP_CREATE: + return "CREATE"; + case HW_FENCE_LOOKUP_OP_DESTROY: + return "DESTROY"; + case HW_FENCE_LOOKUP_OP_CREATE_JOIN: + return "CREATE_JOIN"; + case HW_FENCE_LOOKUP_OP_FIND_FENCE: + return "FIND_FENCE"; + default: + return "UNKNOWN"; + } + + return "UNKNOWN"; +} + +struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fences_tbl, u64 context, u64 seqno, u32 client_id, + u32 pending_child_cnt, enum hw_fence_lookup_ops op_code, u64 *hash) +{ + bool (*compare_fnc)(struct msm_hw_fence *hfence, u64 context, u64 seqno); + void (*process_fnc)(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hfence, + u32 client_id, u64 context, u64 seqno, u32 hash, u32 pending); + struct msm_hw_fence *hw_fence = NULL; + u64 step = 0; + int ret = 0; + bool hw_fence_found = false; + + if (!hash | !drv_data | !hw_fences_tbl) { + HWFNC_ERR("Invalid input for hw_fence_lookup\n"); + return NULL; + } + + *hash = ~0; + + HWFNC_DBG_LUT("hw_fence_lookup: %d\n", op_code); + + switch (op_code) { + case HW_FENCE_LOOKUP_OP_CREATE: + compare_fnc = &_is_hw_fence_free; + process_fnc = &_reserve_hw_fence; + break; + case HW_FENCE_LOOKUP_OP_DESTROY: + compare_fnc = &_hw_fence_match; + process_fnc = &_unreserve_hw_fence; + break; + case HW_FENCE_LOOKUP_OP_CREATE_JOIN: + compare_fnc = &_is_hw_fence_free; + process_fnc = &_reserve_join_fence; + break; + case HW_FENCE_LOOKUP_OP_FIND_FENCE: + compare_fnc = &_hw_fence_match; + process_fnc = &_fence_found; + break; + default: + HWFNC_ERR("Unknown op code:%d\n", op_code); + return NULL; + } + + while (!hw_fence_found && (step < drv_data->hw_fence_table_entries)) { + + /* Calculate the Hash for the Fence */ + ret = _calculate_hash(drv_data->hw_fence_table_entries, context, seqno, step, hash); + if (ret) { + HWFNC_ERR("error calculating hash ctx:%llu seqno:%llu hash:%llu\n", + context, seqno, *hash); + break; + } + HWFNC_DBG_LUT("calculated hash:%llu [ctx:%llu seqno:%llu]\n", *hash, context, + seqno); + + /* Get element from the table using the hash */ + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, *hash); + HWFNC_DBG_LUT("hw_fence_tbl:0x%pK hw_fence:0x%pK, hash:%llu valid:0x%x\n", + hw_fences_tbl, hw_fence, *hash, hw_fence ? hw_fence->valid : 0xbad); + if (!hw_fence) { + HWFNC_ERR("bad hw fence ctx:%llu seqno:%llu hash:%llu\n", + context, seqno, *hash); + break; + } + + GLOBAL_ATOMIC_STORE(&hw_fence->lock, 1); + + /* compare to either find a free fence or find an allocated fence */ + if (compare_fnc(hw_fence, context, seqno)) { + + /* Process the hw fence found by the algorithm */ + if (process_fnc) { + process_fnc(drv_data, hw_fence, client_id, context, seqno, *hash, + pending_child_cnt); + + /* update memory table with processing */ + wmb(); + } + + HWFNC_DBG_L("client_id:%lu op:%s ctx:%llu seqno:%llu hash:%llu step:%llu\n", + client_id, _get_op_mode(op_code), context, seqno, *hash, step); + + hw_fence_found = true; + } else { + if ((op_code == HW_FENCE_LOOKUP_OP_CREATE || + op_code == HW_FENCE_LOOKUP_OP_CREATE_JOIN) && + seqno == hw_fence->seq_id && context == hw_fence->ctx_id) { + /* ctx & seqno must be unique creating a hw-fence */ + HWFNC_ERR("cannot create hw fence with same ctx:%llu seqno:%llu\n", + context, seqno); + GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); + break; + } + /* compare can fail if we have a collision, we will linearly resolve it */ + HWFNC_DBG_H("compare failed for hash:%llu [ctx:%llu seqno:%llu]\n", *hash, + context, seqno); + } + + GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); + + /* Increment step for the next loop */ + step++; + } + + /* If we iterated through the whole list and didn't find the fence, return null */ + if (!hw_fence_found) { + HWFNC_ERR("fail to create hw-fence step:%llu\n", step); + hw_fence = NULL; + } + + HWFNC_DBG_LUT("lookup:%d hw_fence:%pK ctx:%llu seqno:%llu hash:%llu flags:0x%llx\n", + op_code, hw_fence, context, seqno, *hash, hw_fence ? hw_fence->flags : -1); + + return hw_fence; +} + +int hw_fence_create(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + u64 context, u64 seqno, u64 *hash) +{ + u32 client_id = hw_fence_client->client_id; + struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; + + int ret = 0; + + /* allocate hw fence in table */ + if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl, + context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_CREATE, hash)) { + HWFNC_ERR("Fail to create fence client:%lu ctx:%llu seqno:%llu\n", + client_id, context, seqno); + ret = -EINVAL; + } + + return ret; +} + +static inline int _hw_fence_cleanup(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fences_tbl, u32 client_id, u64 context, u64 seqno) { + u64 hash; + + if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl, + context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_DESTROY, &hash)) + return -EINVAL; + + return 0; +} + +int hw_fence_destroy(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + u64 context, u64 seqno) +{ + u32 client_id = hw_fence_client->client_id; + struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; + int ret = 0; + + /* remove hw fence from table*/ + if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno)) { + HWFNC_ERR("Fail destroying fence client:%lu ctx:%llu seqno:%llu\n", + client_id, context, seqno); + ret = -EINVAL; + } + + return ret; +} + +static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + struct dma_fence_array *array, u64 *hash, bool create) +{ + struct msm_hw_fence *hw_fences_tbl; + struct msm_hw_fence *join_fence = NULL; + u64 context, seqno; + u32 client_id, pending_child_cnt; + + /* + * NOTE: For now we are allocating the join fences from the same table as all + * the other fences (i.e. drv_data->hw_fences_tbl), functionally this will work, however, + * this might impact the lookup algorithm, since the "join-fences" are created with the + * context and seqno of a fence-array, and those might not be changing by the client, + * so this will linearly increment the look-up and very likely impact the other fences if + * these join-fences start to fill-up a particular region of the fences global table. + * So we might have to allocate a different table altogether for these join fences. + * However, to do this, just alloc another table and change it here: + */ + hw_fences_tbl = drv_data->hw_fences_tbl; + + context = array->base.context; + seqno = array->base.seqno; + pending_child_cnt = array->num_fences; + client_id = HW_FENCE_JOIN_FENCE_CLIENT_ID; + + if (create) { + /* allocate the fence */ + join_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context, + seqno, client_id, pending_child_cnt, HW_FENCE_LOOKUP_OP_CREATE_JOIN, hash); + if (!join_fence) + HWFNC_ERR("Fail to create join fence client:%lu ctx:%llu seqno:%llu\n", + client_id, context, seqno); + } else { + /* destroy the fence */ + if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno)) + HWFNC_ERR("Fail destroying join fence client:%lu ctx:%llu seqno:%llu\n", + client_id, context, seqno); + } + + return join_fence; +} + +struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + u64 context, u64 seqno, u64 *hash) +{ + struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; + struct msm_hw_fence *hw_fence; + u32 client_id = hw_fence_client ? hw_fence_client->client_id : 0xff; + + /* find the hw fence */ + hw_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context, + seqno, client_id, 0, HW_FENCE_LOOKUP_OP_FIND_FENCE, hash); + if (!hw_fence) + HWFNC_ERR("Fail to find hw fence client:%lu ctx:%llu seqno:%llu\n", + client_id, context, seqno); + + return hw_fence; +} + +static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, + u64 flags, u32 error) +{ + u32 tx_client_id = drv_data->ipcc_client_id; + u32 rx_client_id = hw_fence_client->ipc_client_id; + + HWFNC_DBG_H("We must signal the client now! hfence hash:%llu\n", hash); + + /* Write to Rx queue */ + if (hw_fence_client->update_rxq) + hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id, + hw_fence->seq_id, hash, flags, error, HW_FENCE_RX_QUEUE - 1); + + /* Signal the hw fence now */ + hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, + hw_fence_client->ipc_signal_id); +} + +static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, int iteration, struct dma_fence_array *array, + struct msm_hw_fence *join_fence, u64 hash_join_fence) +{ + struct dma_fence *child_fence; + struct msm_hw_fence *hw_fence_child; + int idx, j; + u64 hash = 0; + + /* cleanup the child-fences from the parent join-fence */ + for (idx = iteration; idx >= 0; idx--) { + child_fence = array->fences[idx]; + + hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context, + child_fence->seqno, &hash); + if (!hw_fence_child) { + HWFNC_ERR("Cannot cleanup child fence context:%lu seqno:%lu hash:%lu\n", + child_fence->context, child_fence->seqno, hash); + + /* + * ideally this should not have happened, but if it did, try to keep + * cleaning-up other fences after printing the error + */ + continue; + } + + /* lock the child while we clean it up from the parent join-fence */ + GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 1); /* lock */ + for (j = hw_fence_child->parents_cnt; j > 0; j--) { + + if (j > MSM_HW_FENCE_MAX_JOIN_PARENTS) { + HWFNC_ERR("Invalid max parents_cnt:%d, will reset to max:%d\n", + hw_fence_child->parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS); + + j = MSM_HW_FENCE_MAX_JOIN_PARENTS; + } + + if (hw_fence_child->parent_list[j - 1] == hash_join_fence) { + hw_fence_child->parent_list[j - 1] = HW_FENCE_INVALID_PARENT_FENCE; + + if (hw_fence_child->parents_cnt) + hw_fence_child->parents_cnt--; + + /* update memory for the table update */ + wmb(); + } + } + GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 0); /* unlock */ + } + + /* destroy join fence */ + _hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence, + false); +} + +int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array) +{ + struct msm_hw_fence *join_fence; + struct msm_hw_fence *hw_fence_child; + struct dma_fence *child_fence; + u32 signaled_fences = 0; + u64 hash_join_fence, hash; + int i, ret = 0; + + /* + * Create join fence from the join-fences table, + * This function initializes: + * join_fence->pending_child_count = array->num_fences + */ + join_fence = _hw_fence_process_join_fence(drv_data, hw_fence_client, array, + &hash_join_fence, true); + if (!join_fence) { + HWFNC_ERR("cannot alloc hw fence for join fence array\n"); + return -EINVAL; + } + + /* update this as waiting client of the join-fence */ + GLOBAL_ATOMIC_STORE(&join_fence->lock, 1); /* lock */ + join_fence->wait_client_mask |= BIT(hw_fence_client->client_id); + GLOBAL_ATOMIC_STORE(&join_fence->lock, 0); /* unlock */ + + /* Iterate through fences of the array */ + for (i = 0; i < array->num_fences; i++) { + child_fence = array->fences[i]; + + /* Nested fence-arrays are not supported */ + if (to_dma_fence_array(child_fence)) { + HWFNC_ERR("This is a nested fence, fail!\n"); + ret = -EINVAL; + goto error_array; + } + + /* All elements in the fence-array must be hw-fences */ + if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &child_fence->flags)) { + HWFNC_ERR("DMA Fence in FenceArray is not a HW Fence\n"); + ret = -EINVAL; + goto error_array; + } + + /* Find the HW Fence in the Global Table */ + hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context, + child_fence->seqno, &hash); + if (!hw_fence_child) { + HWFNC_ERR("Cannot find child fence context:%lu seqno:%lu hash:%lu\n", + child_fence->context, child_fence->seqno, hash); + ret = -EINVAL; + goto error_array; + } + + GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 1); /* lock */ + if (hw_fence_child->flags & MSM_HW_FENCE_FLAG_SIGNAL) { + + /* child fence is already signaled */ + GLOBAL_ATOMIC_STORE(&join_fence->lock, 1); /* lock */ + join_fence->pending_child_cnt--; + + /* update memory for the table update */ + wmb(); + + GLOBAL_ATOMIC_STORE(&join_fence->lock, 0); /* unlock */ + signaled_fences++; + } else { + + /* child fence is not signaled */ + hw_fence_child->parents_cnt++; + + if (hw_fence_child->parents_cnt >= MSM_HW_FENCE_MAX_JOIN_PARENTS + || hw_fence_child->parents_cnt < 1) { + + /* Max number of parents for a fence is exceeded */ + HWFNC_ERR("DMA Fence in FenceArray exceeds parents:%d\n", + hw_fence_child->parents_cnt); + hw_fence_child->parents_cnt--; + + /* update memory for the table update */ + wmb(); + + GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 0); /* unlock */ + ret = -EINVAL; + goto error_array; + } + + hw_fence_child->parent_list[hw_fence_child->parents_cnt - 1] = + hash_join_fence; + + /* update memory for the table update */ + wmb(); + } + GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 0); /* unlock */ + } + + /* all fences were signaled, signal client now */ + if (signaled_fences == array->num_fences) { + + /* signal the join hw fence */ + _fence_ctl_signal(drv_data, hw_fence_client, join_fence, hash_join_fence, 0, 0); + + /* + * job of the join-fence is finished since we already signaled, + * we can delete it now. This can happen when all the fences that + * are part of the join-fence are already signaled. + */ + _hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence, + false); + } + + return ret; + +error_array: + _cleanup_join_and_child_fences(drv_data, hw_fence_client, i, array, join_fence, + hash_join_fence); + + return -EINVAL; +} + +int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno) +{ + struct msm_hw_fence *hw_fence; + u64 hash; + + /* find the hw fence within the table */ + hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, &hash); + if (!hw_fence) { + HWFNC_ERR("Cannot find fence!\n"); + return -EINVAL; + } + + GLOBAL_ATOMIC_STORE(&hw_fence->lock, 1); /* lock */ + + /* register client in the hw fence */ + hw_fence->wait_client_mask |= BIT(hw_fence_client->client_id); + hw_fence->fence_wait_time = hw_fence_get_qtime(drv_data); + hw_fence->debug_refcount++; + + /* update memory for the table update */ + wmb(); + + /* if hw fence already signaled, signal the client */ + if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) + _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, hash, 0, 0); + + GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); /* unlock */ + + return 0; +} + +int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + struct dma_fence *fence) +{ + int ret = 0; + + if (!drv_data | !hw_fence_client | !fence) { + HWFNC_ERR("Invalid Input!\n"); + return -EINVAL; + } + /* fence must be hw-fence */ + if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + HWFNC_ERR("DMA Fence in is not a HW Fence flags:0x%llx\n", fence->flags); + return -EINVAL; + } + + ret = hw_fence_register_wait_client(drv_data, hw_fence_client, fence->context, + fence->seqno); + if (ret) + HWFNC_ERR("Error registering for wait client:%d\n", hw_fence_client->client_id); + + return ret; +} + +int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, + u32 reset_flags) +{ + int ret = 0; + enum hw_fence_client_id wait_client_id; + struct msm_hw_fence_client *hw_fence_wait_client; + int error = (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_ERROR) ? 0 : MSM_HW_FENCE_ERROR_RESET; + + GLOBAL_ATOMIC_STORE(&hw_fence->lock, 1); /* lock */ + if (hw_fence->wait_client_mask & BIT(hw_fence_client->client_id)) { + HWFNC_DBG_H("clearing client:%d wait bit for fence: ctx:%d seqno:%d\n", + hw_fence_client->client_id, hw_fence->ctx_id, + hw_fence->seq_id); + hw_fence->wait_client_mask &= ~BIT(hw_fence_client->client_id); + + /* update memory for the table update */ + wmb(); + } + GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); /* unlock */ + + if (hw_fence->fence_allocator == hw_fence_client->client_id) { + + /* signal with an error all the waiting clients for this fence */ + for (wait_client_id = 0; wait_client_id < HW_FENCE_CLIENT_MAX; wait_client_id++) { + if (hw_fence->wait_client_mask & BIT(wait_client_id)) { + hw_fence_wait_client = drv_data->clients[wait_client_id]; + + if (hw_fence_wait_client) + _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence, + hash, 0, error); + } + } + + if (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY) + goto skip_destroy; + + ret = hw_fence_destroy(drv_data, hw_fence_client, + hw_fence->ctx_id, hw_fence->seq_id); + if (ret) { + HWFNC_ERR("Error destroying HW fence: ctx:%d seqno:%d\n", + hw_fence->ctx_id, hw_fence->seq_id); + } + } + +skip_destroy: + return ret; +} diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c new file mode 100644 index 0000000000..226df1e0cb --- /dev/null +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -0,0 +1,644 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_debug.h" + +static void _lock(uint64_t *wait) +{ + /* WFE Wait */ +#if defined(__aarch64__) + __asm__("SEVL\n\t" + "PRFM PSTL1KEEP, [%x[i_lock]]\n\t" + "1:\n\t" + "WFE\n\t" + "LDAXR W5, [%x[i_lock]]\n\t" + "CBNZ W5, 1b\n\t" + "STXR W5, W0, [%x[i_lock]]\n\t" + "CBNZ W5, 1b\n" + : + : [i_lock] "r" (wait) + : "memory"); +#endif +} + +static void _unlock(uint64_t *lock) +{ + /* Signal Client */ +#if defined(__aarch64__) + __asm__("STLR WZR, [%x[i_out]]\n\t" + "SEV\n" + : + : [i_out] "r" (lock) + : "memory"); +#endif +} + +void global_atomic_store(uint64_t *lock, bool val) +{ + if (val) + _lock(lock); + else + _unlock(lock); +} + +/* + * Each bit in this mask represents each of the loopback clients supported in + * the enum hw_fence_loopback_id + */ +#define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7f + +static inline int _process_dpu_client_loopback(struct hw_fence_driver_data *drv_data, + int client_id) +{ + int ctl_id = client_id; /* dpu ctl path id is mapped to client id used for the loopback */ + void *ctl_start_reg; + u32 val; + + if (ctl_id > HW_FENCE_LOOPBACK_DPU_CTL_5) { + HWFNC_ERR("invalid ctl_id:%d\n", ctl_id); + return -EINVAL; + } + + ctl_start_reg = drv_data->ctl_start_ptr[ctl_id]; + if (!ctl_start_reg) { + HWFNC_ERR("ctl_start reg not valid for ctl_id:%d\n", ctl_id); + return -EINVAL; + } + + HWFNC_DBG_H("Processing DPU loopback ctl_id:%d\n", ctl_id); + + val = 0x1; /* ctl_start trigger */ +#ifdef CTL_START_SIM + HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x\n", ctl_start_reg, val, ctl_id); + writel_relaxed(val, ctl_start_reg); +#else + HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x (COMMENTED)\n", ctl_id, + ctl_start_reg, val); +#endif + + return 0; +} + +static inline int _process_gfx_client_loopback(struct hw_fence_driver_data *drv_data, + int client_id) +{ + int queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */ + struct msm_hw_fence_queue_payload payload; + int read = 1; + + HWFNC_DBG_IRQ("Processing GFX loopback client_id:%d\n", client_id); + while (read) { + /* + * 'client_id' is the loopback-client-id, not the hw-fence client_id, + * so use GFX hw-fence client id, to get the client data + */ + read = hw_fence_read_queue(drv_data->clients[HW_FENCE_CLIENT_ID_CTX0], &payload, + queue_type); + if (read < 0) { + HWFNC_ERR("unable to read gfx rxq\n"); + break; + } + HWFNC_DBG_L("GFX loopback rxq read: hash:%llu ctx:%llu seq:%llu f:%llu e:%lu\n", + payload.hash, payload.ctxt_id, payload.seqno, payload.flags, payload.error); + } + + return read; +} + +static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int client_id) +{ + int ret; + + HWFNC_DBG_H("Processing loopback client_id:%d\n", client_id); + switch (client_id) { + case HW_FENCE_LOOPBACK_DPU_CTL_0: + case HW_FENCE_LOOPBACK_DPU_CTL_1: + case HW_FENCE_LOOPBACK_DPU_CTL_2: + case HW_FENCE_LOOPBACK_DPU_CTL_3: + case HW_FENCE_LOOPBACK_DPU_CTL_4: + case HW_FENCE_LOOPBACK_DPU_CTL_5: + ret = _process_dpu_client_loopback(drv_data, client_id); + break; + case HW_FENCE_LOOPBACK_GFX_CTX_0: + ret = _process_gfx_client_loopback(drv_data, client_id); + break; + default: + HWFNC_ERR("unknown client:%d\n", client_id); + ret = -EINVAL; + } + + return ret; +} + +void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags) +{ + int client_id = HW_FENCE_LOOPBACK_DPU_CTL_0; + u64 mask; + + for (; client_id < HW_FENCE_LOOPBACK_MAX; client_id++) { + mask = 1 << client_id; + if (mask & db_flags) { + HWFNC_DBG_H("client_id:%d signaled! flags:0x%llx\n", client_id, db_flags); + + /* process client */ + if (_process_doorbell_client(drv_data, client_id)) + HWFNC_ERR("Failed to process client:%d\n", client_id); + + /* clear mask for this client and if nothing else pending finish */ + db_flags = db_flags & ~(mask); + HWFNC_DBG_H("client_id:%d cleared flags:0x%llx mask:0x%llx ~mask:0x%llx\n", + client_id, db_flags, mask, ~(mask)); + if (!db_flags) + break; + } + } +} + +/* doorbell callback */ +static void _hw_fence_cb(int irq, void *data) +{ + struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data; + gh_dbl_flags_t clear_flags = HW_FENCE_LOOPBACK_CLIENTS_MASK; + int ret; + + if (!drv_data) + return; + + ret = gh_dbl_read_and_clean(drv_data->rx_dbl, &clear_flags, 0); + if (ret) { + HWFNC_ERR("hw_fence db callback, retrieve flags fail ret:%d\n", ret); + return; + } + + HWFNC_DBG_IRQ("db callback label:%d irq:%d flags:0x%llx qtime:%llu\n", drv_data->db_label, + irq, clear_flags, hw_fence_get_qtime(drv_data)); + + hw_fence_utils_process_doorbell_mask(drv_data, clear_flags); +} + +int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data) +{ + struct device_node *node = drv_data->dev->of_node; + struct device_node *node_compat; + const char *compat = "qcom,msm-hw-fence-db"; + int ret; + + node_compat = of_find_compatible_node(node, NULL, compat); + if (!node_compat) { + HWFNC_ERR("Failed to find dev node with compat:%s\n", compat); + return -EINVAL; + } + + ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->db_label); + if (ret) { + HWFNC_ERR("failed to find label info %d\n", ret); + return ret; + } + + HWFNC_DBG_IRQ("registering doorbell db_label:%d\n", drv_data->db_label); + drv_data->rx_dbl = gh_dbl_rx_register(drv_data->db_label, _hw_fence_cb, drv_data); + if (IS_ERR_OR_NULL(drv_data->rx_dbl)) { + ret = PTR_ERR(drv_data->rx_dbl); + HWFNC_ERR("Failed to register doorbell\n"); + return ret; + } + + return 0; +} + +static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, + gh_vmid_t self, gh_vmid_t peer) +{ + u32 src_vmlist[1] = {self}; + int src_perms[2] = {PERM_READ | PERM_WRITE | PERM_EXEC}; + int dst_vmlist[2] = {self, peer}; + int dst_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE}; + struct gh_acl_desc *acl; + struct gh_sgl_desc *sgl; + int ret; + + ret = hyp_assign_phys(drv_data->res.start, resource_size(&drv_data->res), + src_vmlist, 1, dst_vmlist, dst_perms, 2); + if (ret) { + HWFNC_ERR("%s: hyp_assign_phys failed addr=%x size=%u err=%d\n", + __func__, drv_data->res.start, drv_data->size, ret); + return ret; + } + + acl = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]), GFP_KERNEL); + if (!acl) + return -ENOMEM; + sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL); + if (!sgl) { + kfree(acl); + return -ENOMEM; + } + acl->n_acl_entries = 2; + acl->acl_entries[0].vmid = (u16)self; + acl->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W; + acl->acl_entries[1].vmid = (u16)peer; + acl->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W; + + sgl->n_sgl_entries = 1; + sgl->sgl_entries[0].ipa_base = drv_data->res.start; + sgl->sgl_entries[0].size = resource_size(&drv_data->res); + + ret = gh_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label, + acl, sgl, NULL, &drv_data->memparcel); + if (ret) { + HWFNC_ERR("%s: gh_rm_mem_share failed addr=%x size=%u err=%d\n", + __func__, drv_data->res.start, drv_data->size, ret); + /* Attempt to give resource back to HLOS */ + hyp_assign_phys(drv_data->res.start, resource_size(&drv_data->res), + dst_vmlist, 2, + src_vmlist, src_perms, 1); + ret = -EPROBE_DEFER; + } + + kfree(acl); + kfree(sgl); + + return ret; +} + +static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *data) +{ + struct gh_rm_notif_vm_status_payload *vm_status_payload; + struct hw_fence_driver_data *drv_data; + gh_vmid_t peer_vmid; + gh_vmid_t self_vmid; + + drv_data = container_of(nb, struct hw_fence_driver_data, rm_nb); + + HWFNC_DBG_INIT("cmd:0x%lx ++\n", cmd); + if (cmd != GH_RM_NOTIF_VM_STATUS) + goto end; + + vm_status_payload = data; + HWFNC_DBG_INIT("payload vm_status:%d\n", vm_status_payload->vm_status); + if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY && + vm_status_payload->vm_status != GH_RM_VM_STATUS_RESET) + goto end; + + if (gh_rm_get_vmid(drv_data->peer_name, &peer_vmid)) + goto end; + + if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid)) + goto end; + + if (peer_vmid != vm_status_payload->vmid) + goto end; + + switch (vm_status_payload->vm_status) { + case GH_RM_VM_STATUS_READY: + HWFNC_DBG_INIT("init mem\n"); + if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid)) + HWFNC_ERR("failed to share memory\n"); + break; + case GH_RM_VM_STATUS_RESET: + HWFNC_DBG_INIT("reset\n"); + break; + } + +end: + return NOTIFY_DONE; +} + +/* Allocates carved-out mapped memory */ +int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) +{ + struct device_node *node = drv_data->dev->of_node; + struct device_node *node_compat; + const char *compat = "qcom,msm-hw-fence-mem"; + struct device *dev = drv_data->dev; + struct device_node *np; + int notifier_ret, ret; + + node_compat = of_find_compatible_node(node, NULL, compat); + if (!node_compat) { + HWFNC_ERR("Failed to find dev node with compat:%s\n", compat); + return -EINVAL; + } + + ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->label); + if (ret) { + HWFNC_ERR("failed to find label info %d\n", ret); + return ret; + } + + np = of_parse_phandle(node_compat, "shared-buffer", 0); + if (!np) { + HWFNC_ERR("failed to read shared-buffer info\n"); + return -ENOMEM; + } + + ret = of_address_to_resource(np, 0, &drv_data->res); + of_node_put(np); + if (ret) { + HWFNC_ERR("of_address_to_resource failed %d\n", ret); + return -EINVAL; + } + + drv_data->io_mem_base = devm_ioremap(dev, drv_data->res.start, + resource_size(&drv_data->res)); + if (!drv_data->io_mem_base) { + HWFNC_ERR("ioremap failed!\n"); + return -ENXIO; + } + drv_data->size = resource_size(&drv_data->res); + + HWFNC_DBG_INIT("io_mem_base:0x%x start:0x%x end:0x%x size:0x%x name:%s\n", + drv_data->io_mem_base, drv_data->res.start, + drv_data->res.end, drv_data->size, drv_data->res.name); + + memset_io(drv_data->io_mem_base, 0x0, drv_data->size); + + /* Register memory with HYP */ + ret = of_property_read_u32(node_compat, "peer-name", &drv_data->peer_name); + if (ret) + drv_data->peer_name = GH_SELF_VM; + + drv_data->rm_nb.notifier_call = hw_fence_rm_cb; + drv_data->rm_nb.priority = INT_MAX; + notifier_ret = gh_rm_register_notifier(&drv_data->rm_nb); + HWFNC_DBG_INIT("notifier: ret:%d peer_name:%d notifier_ret:%d\n", ret, + drv_data->peer_name, notifier_ret); + if (notifier_ret) { + HWFNC_ERR("fail to register notifier ret:%d\n", notifier_ret); + return -EPROBE_DEFER; + } + + return 0; +} + +char *_get_mem_reserve_type(enum hw_fence_mem_reserve type) +{ + switch (type) { + case HW_FENCE_MEM_RESERVE_CTRL_QUEUE: + return "HW_FENCE_MEM_RESERVE_CTRL_QUEUE"; + case HW_FENCE_MEM_RESERVE_LOCKS_REGION: + return "HW_FENCE_MEM_RESERVE_LOCKS_REGION"; + case HW_FENCE_MEM_RESERVE_TABLE: + return "HW_FENCE_MEM_RESERVE_TABLE"; + case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: + return "HW_FENCE_MEM_RESERVE_CLIENT_QUEUE"; + } + + return "Unknown"; +} + +/* Calculates the memory range for each of the elements in the carved-out memory */ +int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, + enum hw_fence_mem_reserve type, phys_addr_t *phys, void **pa, u32 *size, int client_id) +{ + int ret = 0; + u32 start_offset = 0; + + switch (type) { + case HW_FENCE_MEM_RESERVE_CTRL_QUEUE: + start_offset = 0; + *size = drv_data->hw_fence_mem_ctrl_queues_size; + break; + case HW_FENCE_MEM_RESERVE_LOCKS_REGION: + /* Locks region starts at the end of the ctrl queues */ + start_offset = drv_data->hw_fence_mem_ctrl_queues_size; + *size = HW_FENCE_MEM_LOCKS_SIZE; + break; + case HW_FENCE_MEM_RESERVE_TABLE: + /* HW Fence table starts at the end of the Locks region */ + start_offset = drv_data->hw_fence_mem_ctrl_queues_size + HW_FENCE_MEM_LOCKS_SIZE; + *size = drv_data->hw_fence_mem_fences_table_size; + break; + case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: + if (client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("unexpected client_id:%d\n", client_id); + ret = -EINVAL; + goto exit; + } + + start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size + + HW_FENCE_MEM_LOCKS_SIZE + + drv_data->hw_fence_mem_fences_table_size) + + ((client_id - 1) * drv_data->hw_fence_mem_clients_queues_size); + *size = drv_data->hw_fence_mem_clients_queues_size; + + break; + default: + HWFNC_ERR("Invalid mem reserve type:%d\n", type); + ret = -EINVAL; + break; + } + + if (start_offset + *size > drv_data->size) { + HWFNC_ERR("reservation request:%lu exceeds total size:%d\n", + start_offset + *size, drv_data->size); + return -ENOMEM; + } + + HWFNC_DBG_INIT("type:%s (%d) io_mem_base:0x%x start:0x%x start_offset:%lu size:0x%x\n", + _get_mem_reserve_type(type), type, drv_data->io_mem_base, drv_data->res.start, + start_offset, *size); + + + *phys = drv_data->res.start + (phys_addr_t)start_offset; + *pa = (drv_data->io_mem_base + start_offset); /* offset is in bytes */ + HWFNC_DBG_H("phys:0x%x pa:0x%pK\n", *phys, *pa); + +exit: + return ret; +} + +int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) +{ + int ret; + u32 val = 0; + + ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-table-entries", &val); + if (ret || !val) { + HWFNC_ERR("missing hw fences table entry or invalid ret:%d val:%d\n", ret, val); + return ret; + } + drv_data->hw_fence_table_entries = val; + + if (drv_data->hw_fence_table_entries >= U32_MAX / sizeof(struct msm_hw_fence)) { + HWFNC_ERR("table entries:%lu will overflow table size\n", + drv_data->hw_fence_table_entries); + return -EINVAL; + } + drv_data->hw_fence_mem_fences_table_size = (sizeof(struct msm_hw_fence) * + drv_data->hw_fence_table_entries); + + ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-queue-entries", &val); + if (ret || !val) { + HWFNC_ERR("missing queue entries table entry or invalid ret:%d val:%d\n", ret, val); + return ret; + } + drv_data->hw_fence_queue_entries = val; + + /* ctrl queues init */ + + if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CTRL_QUEUE_PAYLOAD) { + HWFNC_ERR("queue entries:%lu will overflow ctrl queue size\n", + drv_data->hw_fence_queue_entries); + return -EINVAL; + } + drv_data->hw_fence_ctrl_queue_size = HW_FENCE_CTRL_QUEUE_PAYLOAD * + drv_data->hw_fence_queue_entries; + + if (drv_data->hw_fence_ctrl_queue_size >= (U32_MAX - HW_FENCE_HFI_CTRL_HEADERS_SIZE) / + HW_FENCE_CTRL_QUEUES) { + HWFNC_ERR("queue size:%lu will overflow ctrl queue mem size\n", + drv_data->hw_fence_ctrl_queue_size); + return -EINVAL; + } + drv_data->hw_fence_mem_ctrl_queues_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE + + (HW_FENCE_CTRL_QUEUES * drv_data->hw_fence_ctrl_queue_size); + + /* clients queues init */ + + if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) { + HWFNC_ERR("queue entries:%lu will overflow client queue size\n", + drv_data->hw_fence_queue_entries); + return -EINVAL; + } + drv_data->hw_fence_client_queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * + drv_data->hw_fence_queue_entries; + + if (drv_data->hw_fence_client_queue_size >= ((U32_MAX & PAGE_MASK) - + HW_FENCE_HFI_CLIENT_HEADERS_SIZE) / HW_FENCE_CLIENT_QUEUES) { + HWFNC_ERR("queue size:%lu will overflow client queue mem size\n", + drv_data->hw_fence_client_queue_size); + return -EINVAL; + } + drv_data->hw_fence_mem_clients_queues_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE + + (HW_FENCE_CLIENT_QUEUES * drv_data->hw_fence_client_queue_size)); + + HWFNC_DBG_INIT("table: entries=%lu mem_size=%lu queue: entries=%lu\b", + drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size, + drv_data->hw_fence_queue_entries); + HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu clients queues: size=%lu mem_size=%lu\b", + drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size, + drv_data->hw_fence_client_queue_size, drv_data->hw_fence_mem_clients_queues_size); + + return 0; +} + +int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data) +{ + int ret; + u32 reg_config[2]; + void __iomem *ptr; + + /* Get ipcc memory range */ + ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,ipcc-reg", + reg_config, 2); + if (ret) { + HWFNC_ERR("failed to read ipcc reg: %d\n", ret); + return ret; + } + drv_data->ipcc_reg_base = reg_config[0]; + drv_data->ipcc_size = reg_config[1]; + + /* Mmap ipcc registers */ + ptr = devm_ioremap(drv_data->dev, drv_data->ipcc_reg_base, drv_data->ipcc_size); + if (!ptr) { + HWFNC_ERR("failed to ioremap ipcc regs\n"); + return -ENOMEM; + } + drv_data->ipcc_io_mem = ptr; + + HWFNC_DBG_H("mapped address:0x%x size:0x%x io_mem:0x%pK\n", + drv_data->ipcc_reg_base, drv_data->ipcc_size, + drv_data->ipcc_io_mem); + + hw_fence_ipcc_enable_signaling(drv_data); + + return ret; +} + +int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data) +{ + int ret = 0; + unsigned int reg_config[2]; + void __iomem *ptr; + + ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,qtime-reg", + reg_config, 2); + if (ret) { + HWFNC_ERR("failed to read qtimer reg: %d\n", ret); + return ret; + } + + drv_data->qtime_reg_base = reg_config[0]; + drv_data->qtime_size = reg_config[1]; + + ptr = devm_ioremap(drv_data->dev, drv_data->qtime_reg_base, drv_data->qtime_size); + if (!ptr) { + HWFNC_ERR("failed to ioremap qtime regs\n"); + return -ENOMEM; + } + + drv_data->qtime_io_mem = ptr; + + return ret; +} + +static int _map_ctl_start(struct hw_fence_driver_data *drv_data, u32 ctl_id, + void **iomem_ptr, uint32_t *iomem_size) +{ + u32 reg_config[2]; + void __iomem *ptr; + char name[30] = {0}; + int ret; + + snprintf(name, sizeof(name), "qcom,dpu-ctl-start-%d-reg", ctl_id); + ret = of_property_read_u32_array(drv_data->dev->of_node, name, reg_config, 2); + if (ret) + return 0; /* this is an optional property */ + + /* Mmap registers */ + ptr = devm_ioremap(drv_data->dev, reg_config[0], reg_config[1]); + if (!ptr) { + HWFNC_ERR("failed to ioremap %s reg\n", name); + return -ENOMEM; + } + + *iomem_ptr = ptr; + *iomem_size = reg_config[1]; + + HWFNC_DBG_INIT("mapped ctl_start ctl_id:%d name:%s address:0x%x size:0x%x io_mem:0x%pK\n", + ctl_id, name, reg_config[0], reg_config[1], ptr); + + return 0; +} + +int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data) +{ + u32 ctl_id = HW_FENCE_LOOPBACK_DPU_CTL_0; + + for (; ctl_id <= HW_FENCE_LOOPBACK_DPU_CTL_5; ctl_id++) { + if (_map_ctl_start(drv_data, ctl_id, &drv_data->ctl_start_ptr[ctl_id], + &drv_data->ctl_start_size[ctl_id])) { + HWFNC_ERR("cannot map ctl_start ctl_id:%d\n", ctl_id); + } else { + if (drv_data->ctl_start_ptr[ctl_id]) + HWFNC_DBG_INIT("mapped ctl_id:%d ctl_start_ptr:0x%pK size:%u\n", + ctl_id, drv_data->ctl_start_ptr[ctl_id], + drv_data->ctl_start_size[ctl_id]); + } + } + + return 0; +} diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c new file mode 100644 index 0000000000..e2e61947c4 --- /dev/null +++ b/hw_fence/src/msm_hw_fence.c @@ -0,0 +1,486 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_debug.h" +#include "hw_fence_drv_ipc.h" + +struct hw_fence_driver_data *hw_fence_drv_data; + +void *msm_hw_fence_register(enum hw_fence_client_id client_id, + struct msm_hw_fence_mem_addr *mem_descriptor) +{ + struct msm_hw_fence_client *hw_fence_client; + int ret; + + HWFNC_DBG_H("++ client_id:%d\n", client_id); + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return ERR_PTR(-EAGAIN); + } + + if (!mem_descriptor || client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid params: %d client_id:%d\n", + !mem_descriptor, client_id); + return ERR_PTR(-EINVAL); + } + + /* Avoid race condition if multiple-threads request same client at same time */ + mutex_lock(&hw_fence_drv_data->clients_mask_lock); + if (hw_fence_drv_data->client_id_mask & BIT(client_id)) { + HWFNC_ERR("client with id %d already registered\n", client_id); + mutex_unlock(&hw_fence_drv_data->clients_mask_lock); + return ERR_PTR(-EINVAL); + } + + /* Mark client as registered */ + hw_fence_drv_data->client_id_mask |= BIT(client_id); + mutex_unlock(&hw_fence_drv_data->clients_mask_lock); + + /* Alloc client handle */ + hw_fence_client = kzalloc(sizeof(*hw_fence_client), GFP_KERNEL); + if (!hw_fence_client) { + mutex_lock(&hw_fence_drv_data->clients_mask_lock); + hw_fence_drv_data->client_id_mask &= ~BIT(client_id); + mutex_unlock(&hw_fence_drv_data->clients_mask_lock); + return ERR_PTR(-ENOMEM); + } + hw_fence_client->client_id = client_id; + hw_fence_client->ipc_client_id = hw_fence_ipcc_get_client_id(hw_fence_drv_data, client_id); + + if (hw_fence_client->ipc_client_id <= 0) { + HWFNC_ERR("Failed to find client:%d ipc id\n", client_id); + ret = -EINVAL; + goto error; + } + + hw_fence_client->ipc_signal_id = hw_fence_ipcc_get_signal_id(hw_fence_drv_data, client_id); + if (hw_fence_client->ipc_signal_id < 0) { + HWFNC_ERR("Failed to find client:%d signal\n", client_id); + ret = -EINVAL; + goto error; + } + + hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id); + + hw_fence_drv_data->clients[client_id] = hw_fence_client; + + /* Alloc Client HFI Headers and Queues */ + ret = hw_fence_alloc_client_resources(hw_fence_drv_data, + hw_fence_client, mem_descriptor); + if (ret) + goto error; + + /* Initialize signal for communication withe FenceCTL */ + ret = hw_fence_init_controller_signal(hw_fence_drv_data, hw_fence_client); + if (ret) + goto error; + + /* + * Update Fence Controller with the address of the Queues and + * the Fences Tables for this client + */ + ret = hw_fence_init_controller_resources(hw_fence_client); + if (ret) + goto error; + + HWFNC_DBG_INIT("-- Initialized ptr:0x%p client_id:%d ipc_signal_id:%d ipc_client_id:%d\n", + hw_fence_client, hw_fence_client->client_id, hw_fence_client->ipc_signal_id, + hw_fence_client->ipc_client_id); + + return (void *)hw_fence_client; +error: + + /* Free all the allocated resources */ + hw_fence_cleanup_client(hw_fence_drv_data, hw_fence_client); + + HWFNC_ERR("failed with error:%d\n", ret); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(msm_hw_fence_register); + +int msm_hw_fence_deregister(void *client_handle) +{ + struct msm_hw_fence_client *hw_fence_client; + + if (IS_ERR_OR_NULL(client_handle)) { + HWFNC_ERR("Invalid client handle\n"); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + HWFNC_DBG_H("+\n"); + + /* Free all the allocated resources */ + hw_fence_cleanup_client(hw_fence_drv_data, hw_fence_client); + + HWFNC_DBG_H("-\n"); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_deregister); + +int msm_hw_fence_create(void *client_handle, + struct msm_hw_fence_create_params *params) +{ + struct msm_hw_fence_client *hw_fence_client; + struct dma_fence_array *array; + struct dma_fence *fence; + int ret; + + if (IS_ERR_OR_NULL(client_handle) || !params || !params->fence || !params->handle) { + HWFNC_ERR("Invalid input\n"); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + fence = (struct dma_fence *)params->fence; + + HWFNC_DBG_H("+\n"); + + /* Block any Fence-Array, we should only get individual fences */ + array = to_dma_fence_array(fence); + if (array) { + HWFNC_ERR("HW Fence must be created for individual fences\n"); + return -EINVAL; + } + + /* This Fence is already a HW-Fence */ + if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + HWFNC_ERR("DMA Fence already has HW Fence Flag set\n"); + return -EINVAL; + } + + /* Create the HW Fence, i.e. add entry in the Global Table for this Fence */ + ret = hw_fence_create(hw_fence_drv_data, hw_fence_client, + fence->context, fence->seqno, params->handle); + if (ret) { + HWFNC_ERR("Error creating HW fence\n"); + return ret; + } + + /* If no error, set the HW Fence Flag in the dma-fence */ + set_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags); + + HWFNC_DBG_H("-\n"); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_create); + +int msm_hw_fence_destroy(void *client_handle, + struct dma_fence *fence) +{ + struct msm_hw_fence_client *hw_fence_client; + struct dma_fence_array *array; + int ret; + + if (IS_ERR_OR_NULL(client_handle) || !fence) { + HWFNC_ERR("Invalid data\n"); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + HWFNC_DBG_H("+\n"); + + /* Block any Fence-Array, we should only get individual fences */ + array = to_dma_fence_array(fence); + if (array) { + HWFNC_ERR("HW Fence must be destroy for individual fences\n"); + return -EINVAL; + } + + /* This Fence not a HW-Fence */ + if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + HWFNC_ERR("DMA Fence is not a HW Fence flags:0x%llx\n", fence->flags); + return -EINVAL; + } + + /* Destroy the HW Fence, i.e. remove entry in the Global Table for the Fence */ + ret = hw_fence_destroy(hw_fence_drv_data, hw_fence_client, + fence->context, fence->seqno); + if (ret) { + HWFNC_ERR("Error destroying the HW fence\n"); + return ret; + } + + /* Clear the HW Fence Flag in the dma-fence */ + clear_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags); + + HWFNC_DBG_H("-\n"); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_destroy); + +int msm_hw_fence_wait_update(void *client_handle, + struct dma_fence **fence_list, u32 num_fences, bool create) +{ + struct msm_hw_fence_client *hw_fence_client; + struct dma_fence_array *array; + int i, ret = 0; + + if (IS_ERR_OR_NULL(client_handle) || !fence_list || !*fence_list) { + HWFNC_ERR("Invalid data\n"); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + HWFNC_DBG_H("+\n"); + + /* Process all the list of fences */ + for (i = 0; i < num_fences; i++) { + struct dma_fence *fence = fence_list[i]; + + /* Process a Fence-Array */ + array = to_dma_fence_array(fence); + if (array) { + ret = hw_fence_process_fence_array(hw_fence_drv_data, hw_fence_client, + array); + if (ret) { + HWFNC_ERR("Failed to create FenceArray\n"); + return ret; + } + } else { + /* Process individual Fence */ + ret = hw_fence_process_fence(hw_fence_drv_data, hw_fence_client, fence); + if (ret) { + HWFNC_ERR("Failed to create Fence\n"); + return ret; + } + } + } + + HWFNC_DBG_H("-\n"); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_wait_update); + +int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) +{ + struct msm_hw_fence_client *hw_fence_client; + struct msm_hw_fence *hw_fences_tbl; + int i; + + if (IS_ERR_OR_NULL(client_handle)) { + HWFNC_ERR("Invalid client handle!\n"); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + hw_fences_tbl = hw_fence_drv_data->hw_fences_tbl; + + HWFNC_DBG_L("reset fences for client:%d\n", hw_fence_client->client_id); + for (i = 0; i < hw_fence_drv_data->hw_fences_tbl_cnt; i++) + hw_fence_utils_cleanup_fence(hw_fence_drv_data, hw_fence_client, + &hw_fences_tbl[i], i, reset_flags); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_reset_client); + +int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error) +{ + struct msm_hw_fence_client *hw_fence_client; + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -EAGAIN; + } else if (IS_ERR_OR_NULL(client_handle) || + (handle >= hw_fence_drv_data->hw_fences_tbl_cnt)) { + HWFNC_ERR("Invalid handle:%d or client handle:%d max:%d\n", handle, + IS_ERR_OR_NULL(client_handle), hw_fence_drv_data->hw_fences_tbl_cnt); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + /* Write to Tx queue */ + hw_fence_update_queue(hw_fence_drv_data, hw_fence_client, + hw_fence_drv_data->hw_fences_tbl[handle].ctx_id, + hw_fence_drv_data->hw_fences_tbl[handle].seq_id, handle, + flags, error, HW_FENCE_TX_QUEUE - 1); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_update_txq); + +int msm_hw_fence_trigger_signal(void *client_handle, + u32 tx_client_id, u32 rx_client_id, + u32 signal_id) +{ + struct msm_hw_fence_client *hw_fence_client; + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -EAGAIN; + } else if (IS_ERR_OR_NULL(client_handle)) { + HWFNC_ERR("Invalid client\n"); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + HWFNC_DBG_H("sending ipc for client:%d\n", hw_fence_client->client_id); + hw_fence_ipcc_trigger_signal(hw_fence_drv_data, tx_client_id, + rx_client_id, signal_id); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_trigger_signal); + +/* Function used for simulation purposes only. */ +int msm_hw_fence_driver_doorbell_sim(u64 db_mask) +{ + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -EAGAIN; + } + + HWFNC_DBG_IRQ("db callback sim-mode flags:0x%llx qtime:%llu\n", + db_mask, hw_fence_get_qtime(hw_fence_drv_data)); + + hw_fence_utils_process_doorbell_mask(hw_fence_drv_data, db_mask); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_driver_doorbell_sim); + +static int msm_hw_fence_probe_init(struct platform_device *pdev) +{ + int rc; + + HWFNC_DBG_H("+\n"); + + hw_fence_drv_data = kzalloc(sizeof(*hw_fence_drv_data), GFP_KERNEL); + if (!hw_fence_drv_data) + return -ENOMEM; + + dev_set_drvdata(&pdev->dev, hw_fence_drv_data); + hw_fence_drv_data->dev = &pdev->dev; + + /* Initialize HW Fence Driver resources */ + rc = hw_fence_init(hw_fence_drv_data); + if (rc) + goto error; + + mutex_init(&hw_fence_drv_data->clients_mask_lock); + + /* set ready ealue so clients can register */ + hw_fence_drv_data->resources_ready = true; + + HWFNC_DBG_H("-\n"); + + return rc; + +error: + dev_set_drvdata(&pdev->dev, NULL); + kfree(hw_fence_drv_data); + hw_fence_drv_data = (void *) -EPROBE_DEFER; + + HWFNC_ERR("error %d\n", rc); + return rc; +} + +static int msm_hw_fence_probe(struct platform_device *pdev) +{ + int rc = -EINVAL; + + HWFNC_DBG_H("+\n"); + + if (!pdev) { + HWFNC_ERR("null platform dev\n"); + return -EINVAL; + } + + if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-hw-fence")) + rc = msm_hw_fence_probe_init(pdev); + if (rc) + goto err_exit; + + HWFNC_DBG_H("-\n"); + + return 0; + +err_exit: + HWFNC_ERR("error %d\n", rc); + return rc; +} + +static int msm_hw_fence_remove(struct platform_device *pdev) +{ + HWFNC_DBG_H("+\n"); + + if (!pdev) { + HWFNC_ERR("null platform dev\n"); + return -EINVAL; + } + + hw_fence_drv_data = dev_get_drvdata(&pdev->dev); + if (!hw_fence_drv_data) { + HWFNC_ERR("null driver data\n"); + return -EINVAL; + } + + dev_set_drvdata(&pdev->dev, NULL); + kfree(hw_fence_drv_data); + hw_fence_drv_data = (void *) -EPROBE_DEFER; + + HWFNC_DBG_H("-\n"); + + return 0; +} + +static const struct of_device_id msm_hw_fence_dt_match[] = { + {.compatible = "qcom,msm-hw-fence"}, + {} +}; + +static struct platform_driver msm_hw_fence_driver = { + .probe = msm_hw_fence_probe, + .remove = msm_hw_fence_remove, + .driver = { + .name = "msm-hw-fence", + .of_match_table = of_match_ptr(msm_hw_fence_dt_match), + }, +}; + +static int __init msm_hw_fence_init(void) +{ + int rc = 0; + + HWFNC_DBG_H("+\n"); + + rc = platform_driver_register(&msm_hw_fence_driver); + if (rc) { + HWFNC_ERR("%s: failed to register platform driver\n", + __func__); + return rc; + } + + HWFNC_DBG_H("-\n"); + + return 0; +} + +static void __exit msm_hw_fence_exit(void) +{ + HWFNC_DBG_H("+\n"); + + platform_driver_unregister(&msm_hw_fence_driver); + + HWFNC_DBG_H("-\n"); +} + +module_init(msm_hw_fence_init); +module_exit(msm_hw_fence_exit); + +MODULE_DESCRIPTION("QTI HW Fence Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/mm_driver_board.mk b/mm_driver_board.mk index 0563c64f97..72954fa842 100644 --- a/mm_driver_board.mk +++ b/mm_driver_board.mk @@ -2,9 +2,12 @@ ifneq ($(TARGET_BOARD_AUTO),true) ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) - BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko - BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko - BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko ifneq ($(TARGET_BOARD_PLATFORM), taro) BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko diff --git a/mm_driver_product.mk b/mm_driver_product.mk index 4d74d27bf4..1f352c0b8a 100644 --- a/mm_driver_product.mk +++ b/mm_driver_product.mk @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only -PRODUCT_PACKAGES += msm_ext_display.ko +PRODUCT_PACKAGES += msm_ext_display.ko msm_hw_fence.ko ifneq ($(TARGET_BOARD_PLATFORM), taro) PRODUCT_PACKAGES += sync_fence.ko From efe7847b7a8d433c2b2bf40f4d2209566de37dd1 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Mon, 25 Apr 2022 12:07:40 -0700 Subject: [PATCH 011/166] mm-drivers: hw_fence: avoid compiling hw_fence driver for taro hw_fence driver is not required for taro variants. Since the Display SI 3.0 is shared with taro dev SI variant, avoid compiling hw_fence as dlkm for taro target. Change-Id: I84637f2546fd0818d956880fbc1bb86a30a7c916 Signed-off-by: Ingrid Gallardo --- Android.mk | 2 +- mm_driver_board.mk | 18 +++++++++--------- mm_driver_product.mk | 4 ++-- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Android.mk b/Android.mk index c703795324..c14968715d 100644 --- a/Android.mk +++ b/Android.mk @@ -1,7 +1,7 @@ MM_DRIVER_PATH := $(call my-dir) include $(MM_DRIVER_PATH)/msm_ext_display/Android.mk -include $(MM_DRIVER_PATH)/hw_fence/Android.mk ifneq ($(TARGET_BOARD_PLATFORM), taro) +include $(MM_DRIVER_PATH)/hw_fence/Android.mk include $(MM_DRIVER_PATH)/sync_fence/Android.mk endif diff --git a/mm_driver_board.mk b/mm_driver_board.mk index 72954fa842..127c8dcc31 100644 --- a/mm_driver_board.mk +++ b/mm_driver_board.mk @@ -2,17 +2,17 @@ ifneq ($(TARGET_BOARD_AUTO),true) ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) - BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ - $(KERNEL_MODULES_OUT)/msm_hw_fence.ko - BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ - $(KERNEL_MODULES_OUT)/msm_hw_fence.ko - BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ - $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko ifneq ($(TARGET_BOARD_PLATFORM), taro) - BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko - BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko - BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/sync_fence.ko + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/sync_fence.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko endif endif endif diff --git a/mm_driver_product.mk b/mm_driver_product.mk index 1f352c0b8a..c7d11b3449 100644 --- a/mm_driver_product.mk +++ b/mm_driver_product.mk @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only -PRODUCT_PACKAGES += msm_ext_display.ko msm_hw_fence.ko +PRODUCT_PACKAGES += msm_ext_display.ko ifneq ($(TARGET_BOARD_PLATFORM), taro) -PRODUCT_PACKAGES += sync_fence.ko +PRODUCT_PACKAGES += sync_fence.ko msm_hw_fence.ko endif From 47157b13975f4cca2a4c61e72d504bef1be1ccd3 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Fri, 27 May 2022 15:05:45 -0700 Subject: [PATCH 012/166] mm-drivers: hw_fence: populate payload size in hfi header Populate the payload size for the ctrl, rx and tx queues that communicate with the fence controller. Change-Id: Idc7dafcccd6ea16821e4f595bdab7395a5e0745b Signed-off-by: Ingrid Gallardo --- hw_fence/src/hw_fence_drv_priv.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index edecc41cbc..cdfe9a81c2 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -31,7 +31,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, void *ptr, *qptr; phys_addr_t phys, qphys; u32 size, start_queue_offset; - int headers_size, queue_size; + int headers_size, queue_size, payload_size; int i, ret = 0; HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id); @@ -39,10 +39,12 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, case HW_FENCE_MEM_RESERVE_CTRL_QUEUE: headers_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE; queue_size = drv_data->hw_fence_ctrl_queue_size; + payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD; break; case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE; queue_size = drv_data->hw_fence_client_queue_size; + payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD; break; default: HWFNC_ERR("Unexpected mem reserve id: %d\n", mem_reserve_id); @@ -102,6 +104,9 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, /* Set the size of this header */ hfi_queue_header->queue_size = queue_size; + /* Set the payload size */ + hfi_queue_header->pkt_size = payload_size; + /* Store Memory info in the Client data */ queues[i].va_queue = qptr; queues[i].pa_queue = qphys; From 2ae3dcadde4b7acc83e4734d5c7f52ddc0114987 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Fri, 3 Jun 2022 11:00:48 -0700 Subject: [PATCH 013/166] mm-drivers: hw_fence: avoid hw fences creation until fctl ready This change adds a check to avoid hw-fences creation until the fence controller is ready. Change-Id: I613c19d9dfd8836f8ded6bcb0162bef647df7bc3 Signed-off-by: Ingrid Gallardo --- hw_fence/include/hw_fence_drv_priv.h | 3 +++ hw_fence/src/hw_fence_drv_utils.c | 2 ++ hw_fence/src/msm_hw_fence.c | 28 ++++++++++++++++++++++++---- 3 files changed, 29 insertions(+), 4 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index e15fd4159c..c5565a63ff 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -211,6 +211,7 @@ struct msm_hw_fence_dbg_data { * @client_id_mask: bitmask for tracking registered client_ids * @clients_mask_lock: lock to synchronize access to the clients mask * @msm_hw_fence_client: table with the handles of the registered clients + * @vm_ready: flag to indicate if vm has been initialized * @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized */ struct hw_fence_driver_data { @@ -286,6 +287,8 @@ struct hw_fence_driver_data { /* table with registered client handles */ struct msm_hw_fence_client *clients[HW_FENCE_CLIENT_MAX]; + + bool vm_ready; #ifdef HW_DPU_IPCC /* state variables */ bool ipcc_dpu_initialized; diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 226df1e0cb..bd02ada6a2 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -306,6 +306,8 @@ static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *da HWFNC_DBG_INIT("init mem\n"); if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid)) HWFNC_ERR("failed to share memory\n"); + else + drv_data->vm_ready = true; break; case GH_RM_VM_STATUS_RESET: HWFNC_DBG_INIT("reset\n"); diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index e2e61947c4..0c8fd65d60 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -142,6 +142,12 @@ int msm_hw_fence_create(void *client_handle, HWFNC_ERR("Invalid input\n"); return -EINVAL; } + + if (!hw_fence_drv_data->vm_ready) { + HWFNC_DBG_H("VM not ready, cannot create fence\n"); + return -EAGAIN; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; fence = (struct dma_fence *)params->fence; @@ -233,6 +239,12 @@ int msm_hw_fence_wait_update(void *client_handle, HWFNC_ERR("Invalid data\n"); return -EINVAL; } + + if (!hw_fence_drv_data->vm_ready) { + HWFNC_DBG_H("VM not ready, cannot destroy fence\n"); + return -EAGAIN; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; HWFNC_DBG_H("+\n"); @@ -276,6 +288,12 @@ int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) HWFNC_ERR("Invalid client handle!\n"); return -EINVAL; } + + if (!hw_fence_drv_data->vm_ready) { + HWFNC_DBG_H("VM not ready, cannot reset client\n"); + return -EAGAIN; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; hw_fences_tbl = hw_fence_drv_data->hw_fences_tbl; @@ -292,8 +310,9 @@ int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 erro { struct msm_hw_fence_client *hw_fence_client; - if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { - HWFNC_ERR("hw fence driver not ready\n"); + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready || + !hw_fence_drv_data->vm_ready) { + HWFNC_ERR("hw fence driver or vm not ready\n"); return -EAGAIN; } else if (IS_ERR_OR_NULL(client_handle) || (handle >= hw_fence_drv_data->hw_fences_tbl_cnt)) { @@ -319,8 +338,9 @@ int msm_hw_fence_trigger_signal(void *client_handle, { struct msm_hw_fence_client *hw_fence_client; - if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { - HWFNC_ERR("hw fence driver not ready\n"); + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready + || !hw_fence_drv_data->vm_ready) { + HWFNC_ERR("hw fence driver or vm not ready\n"); return -EAGAIN; } else if (IS_ERR_OR_NULL(client_handle)) { HWFNC_ERR("Invalid client\n"); From 91cfcb8220c61b62f81a35b9c7c00fd1f39b0598 Mon Sep 17 00:00:00 2001 From: Prabhanjan Kandula Date: Fri, 20 May 2022 11:20:43 -0700 Subject: [PATCH 014/166] mm-drivers: add support for compiling out mm driver modules This change provides required support to disable mm driver modules compilation along with all modules and supports specific flag for override to enable compilation if required. Change-Id: I3ea1383855a6be49ed12a23a3585e9d6ebb1810a Signed-off-by: Prabhanjan Kandula (cherry picked from commit 91a337989da77e6071fdfbd03b43c423356c14c0) --- Android.mk | 17 +++++++++++++---- mm_driver_board.mk | 28 ++++++++++++++++++---------- mm_driver_product.mk | 14 +++++++++++--- 3 files changed, 42 insertions(+), 17 deletions(-) diff --git a/Android.mk b/Android.mk index c14968715d..86e3104278 100644 --- a/Android.mk +++ b/Android.mk @@ -1,7 +1,16 @@ MM_DRIVER_PATH := $(call my-dir) -include $(MM_DRIVER_PATH)/msm_ext_display/Android.mk -ifneq ($(TARGET_BOARD_PLATFORM), taro) -include $(MM_DRIVER_PATH)/hw_fence/Android.mk -include $(MM_DRIVER_PATH)/sync_fence/Android.mk + +MM_DRV_DLKM_ENABLE := true +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) + ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false) + MM_DRV_DLKM_ENABLE := false + endif endif +ifeq ($(MM_DRV_DLKM_ENABLE), true) + include $(MM_DRIVER_PATH)/msm_ext_display/Android.mk + ifneq ($(TARGET_BOARD_PLATFORM), taro) + include $(MM_DRIVER_PATH)/hw_fence/Android.mk + include $(MM_DRIVER_PATH)/sync_fence/Android.mk + endif +endif diff --git a/mm_driver_board.mk b/mm_driver_board.mk index 127c8dcc31..7e18d8bc4e 100644 --- a/mm_driver_board.mk +++ b/mm_driver_board.mk @@ -1,18 +1,26 @@ #SPDX-License-Identifier: GPL-2.0-only -ifneq ($(TARGET_BOARD_AUTO),true) - ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) - BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko - BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko - BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko +MM_DRV_DLKM_ENABLE := true +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) + ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false) + MM_DRV_DLKM_ENABLE := false + endif +endif - ifneq ($(TARGET_BOARD_PLATFORM), taro) - BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \ +ifeq ($(MM_DRV_DLKM_ENABLE), true) + ifneq ($(TARGET_BOARD_AUTO),true) + ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + ifneq ($(TARGET_BOARD_PLATFORM), taro) + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \ $(KERNEL_MODULES_OUT)/msm_hw_fence.ko - BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \ - $(KERNEL_MODULES_OUT)/msm_hw_fence.ko - BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/sync_fence.ko \ + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/sync_fence.ko \ $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + endif endif endif endif diff --git a/mm_driver_product.mk b/mm_driver_product.mk index c7d11b3449..4c2a5d2fe9 100644 --- a/mm_driver_product.mk +++ b/mm_driver_product.mk @@ -1,7 +1,15 @@ -# SPDX-License-Identifier: GPL-2.0-only PRODUCT_PACKAGES += msm_ext_display.ko -ifneq ($(TARGET_BOARD_PLATFORM), taro) -PRODUCT_PACKAGES += sync_fence.ko msm_hw_fence.ko +MM_DRV_DLKM_ENABLE := true +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) + ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false) + MM_DRV_DLKM_ENABLE := false + endif +endif + +ifeq ($(MM_DRV_DLKM_ENABLE), true) + ifneq ($(TARGET_BOARD_PLATFORM), taro) + PRODUCT_PACKAGES += sync_fence.ko msm_hw_fence.ko + endif endif From f73a4b179a18fef496c5ab0203187eb15c2a4c2a Mon Sep 17 00:00:00 2001 From: Shirisha Kollapuram Date: Thu, 26 May 2022 14:58:49 +0530 Subject: [PATCH 015/166] mm-drivers: hw-fence: add hardware fence driver validation ioctls This change adds support to validate the hw_fence driver by adding IOCTLs that expose the hw_fence interfaces so that validation clients can register/unregister, create/destroy and wait/signal fences. IOCTL's will be available for debug purpose only when the debugfs config is set. Change-Id: Idb0d04ee245718e9b19ccd12ac760829831426b0 Signed-off-by: Shirisha Kollapuram --- hw_fence/Kbuild | 2 + hw_fence/include/hw_fence_drv_debug.h | 133 +++++ hw_fence/include/hw_fence_drv_priv.h | 22 + hw_fence/src/hw_fence_drv_debug.c | 149 ++---- hw_fence/src/hw_fence_drv_ipc.c | 18 + hw_fence/src/hw_fence_drv_priv.c | 11 + hw_fence/src/hw_fence_drv_utils.c | 11 + hw_fence/src/hw_fence_ioctl.c | 711 ++++++++++++++++++++++++++ hw_fence/src/msm_hw_fence.c | 4 + 9 files changed, 947 insertions(+), 114 deletions(-) create mode 100644 hw_fence/src/hw_fence_ioctl.c diff --git a/hw_fence/Kbuild b/hw_fence/Kbuild index fcd6b6e7bb..8948d581e9 100644 --- a/hw_fence/Kbuild +++ b/hw_fence/Kbuild @@ -14,5 +14,7 @@ msm_hw_fence-y := src/msm_hw_fence.o \ src/hw_fence_drv_debug.o \ src/hw_fence_drv_ipc.o +msm_hw_fence-$(CONFIG_DEBUG_FS) += src/hw_fence_ioctl.o + CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" endif diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index d980331113..de0e6e7a37 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -6,6 +6,10 @@ #ifndef __HW_FENCE_DRV_DEBUG #define __HW_FENCE_DRV_DEBUG +#include "hw_fence_drv_ipc.h" + +#define HW_FENCE_NAME_SIZE 64 + enum hw_fence_drv_prio { HW_FENCE_HIGH = 0x000001, /* High density debug messages (noisy) */ HW_FENCE_LOW = 0x000002, /* Low density debug messages */ @@ -58,4 +62,133 @@ extern u32 msm_hw_fence_debug_level; int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data); +#if IS_ENABLED(CONFIG_DEBUG_FS) + +int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, int client_id); + +extern const struct file_operations hw_sync_debugfs_fops; + +struct hw_fence_out_clients_map { + int ipc_client_id; /* ipc client id for the hw fence client */ + int ipc_signal_id; /* ipc signal id for the hw fence client */ +}; + +/* These signals are the ones that the actual clients should be triggering, hw-fence driver + * does not need to have knowledge of these signals. Adding them here for debugging purposes. + * Only fence controller and the cliens know these id's, since these + * are to trigger the ipcc from the 'client hw-core' to the 'hw-fence controller' + * The index of this struct must match the enum hw_fence_client_id + */ +static const struct hw_fence_out_clients_map + dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_MAX] = { + {HW_FENCE_IPC_CLIENT_ID_APPS, 0}, /* CTRL_LOOPBACK */ + {HW_FENCE_IPC_CLIENT_ID_GPU, 0}, /* CTX0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 2}, /* CTL0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 4}, /* CTL1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 6}, /* CTL2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 8}, /* CTL3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 10}, /* CTL4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 12}, /* CTL5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 21}, /* VAL0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 22}, /* VAL1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 23}, /* VAL2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 24}, /* VAL3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 25}, /* VAL4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 26}, /* VAL5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 27}, /* VAL6 */ +}; + +/** + * struct hw_dma_fence - fences created by hw-fence for debugging. + * @base: base dma-fence structure, this must remain at beginning of the struct. + * @name: name of each fence. + * @client_handle: handle for the client owner of this fence, this is returned by the hw-fence + * driver after a successful registration of the client and used by this fence + * during release. + */ +struct hw_dma_fence { + struct dma_fence base; + char name[HW_FENCE_NAME_SIZE]; + void *client_handle; +}; + +static inline struct hw_dma_fence *to_hw_dma_fence(struct dma_fence *fence) +{ + return container_of(fence, struct hw_dma_fence, base); +} + +static inline void _cleanup_fences(int i, struct dma_fence **fences, spinlock_t **fences_lock) +{ + struct hw_dma_fence *dma_fence; + int fence_idx; + + for (fence_idx = i; fence_idx >= 0 ; fence_idx--) { + kfree(fences_lock[fence_idx]); + + dma_fence = to_hw_dma_fence(fences[fence_idx]); + kfree(dma_fence); + } + + kfree(fences_lock); + kfree(fences); +} + +static const char *hw_fence_dbg_get_driver_name(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); + + return hw_dma_fence->name; +} + +static const char *hw_fence_dbg_get_timeline_name(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); + + return hw_dma_fence->name; +} + +static bool hw_fence_dbg_enable_signaling(struct dma_fence *fence) +{ + return true; +} + +static void _hw_fence_release(struct hw_dma_fence *hw_dma_fence) +{ + if (IS_ERR_OR_NULL(hw_dma_fence->client_handle)) { + HWFNC_ERR("invalid hwfence data, won't release hw_fence!\n"); + return; + } + + /* release hw-fence */ + if (msm_hw_fence_destroy(hw_dma_fence->client_handle, &hw_dma_fence->base)) + HWFNC_ERR("failed to release hw_fence!\n"); +} + +static void hw_fence_dbg_release(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence; + + if (!fence) + return; + + HWFNC_DBG_H("release backing fence %pK\n", fence); + hw_dma_fence = to_hw_dma_fence(fence); + + if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) + _hw_fence_release(hw_dma_fence); + + kfree(fence->lock); + kfree(hw_dma_fence); +} + +static struct dma_fence_ops hw_fence_dbg_ops = { + .get_driver_name = hw_fence_dbg_get_driver_name, + .get_timeline_name = hw_fence_dbg_get_timeline_name, + .enable_signaling = hw_fence_dbg_enable_signaling, + .wait = dma_fence_default_wait, + .release = hw_fence_dbg_release, +}; + +#endif /* CONFIG_DEBUG_FS */ + #endif /* __HW_FENCE_DRV_DEBUG */ diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index e15fd4159c..5f7b97e33b 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -85,6 +85,13 @@ enum hw_fence_lookup_ops { * HW_FENCE_LOOPBACK_DPU_CTL_4: dpu client 4. Used in platforms with no dpu-ipc. * HW_FENCE_LOOPBACK_DPU_CTL_5: dpu client 5. Used in platforms with no dpu-ipc. * HW_FENCE_LOOPBACK_DPU_CTX_0: gfx client 0. Used in platforms with no gmu support. + * HW_FENCE_LOOPBACK_VAL_0: debug validation client 0. + * HW_FENCE_LOOPBACK_VAL_1: debug validation client 1. + * HW_FENCE_LOOPBACK_VAL_2: debug validation client 2. + * HW_FENCE_LOOPBACK_VAL_3: debug validation client 3. + * HW_FENCE_LOOPBACK_VAL_4: debug validation client 4. + * HW_FENCE_LOOPBACK_VAL_5: debug validation client 5. + * HW_FENCE_LOOPBACK_VAL_6: debug validation client 6. */ enum hw_fence_loopback_id { HW_FENCE_LOOPBACK_DPU_CTL_0, @@ -94,6 +101,15 @@ enum hw_fence_loopback_id { HW_FENCE_LOOPBACK_DPU_CTL_4, HW_FENCE_LOOPBACK_DPU_CTL_5, HW_FENCE_LOOPBACK_GFX_CTX_0, +#if IS_ENABLED(CONFIG_DEBUG_FS) + HW_FENCE_LOOPBACK_VAL_0, + HW_FENCE_LOOPBACK_VAL_1, + HW_FENCE_LOOPBACK_VAL_2, + HW_FENCE_LOOPBACK_VAL_3, + HW_FENCE_LOOPBACK_VAL_4, + HW_FENCE_LOOPBACK_VAL_5, + HW_FENCE_LOOPBACK_VAL_6, +#endif /* CONFIG_DEBUG_FS */ HW_FENCE_LOOPBACK_MAX, }; @@ -121,6 +137,8 @@ struct msm_hw_fence_queue { * @ipc_signal_id: id of the signal to be triggered for this client * @ipc_client_id: id of the ipc client for this hw fence driver client * @update_rxq: bool to indicate if client uses rx-queue + * @wait_queue: wait queue for the validation clients + * @val_signal: doorbell flag to signal the validation clients in the wait queue */ struct msm_hw_fence_client { enum hw_fence_client_id client_id; @@ -129,6 +147,10 @@ struct msm_hw_fence_client { int ipc_signal_id; int ipc_client_id; bool update_rxq; +#if IS_ENABLED(CONFIG_DEBUG_FS) + wait_queue_head_t wait_queue; + atomic_t val_signal; +#endif /* CONFIG_DEBUG_FS */ }; /** diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index c047a3b251..f872c4c197 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -12,7 +12,6 @@ #include "hw_fence_drv_ipc.h" #include "hw_fence_drv_utils.h" -#define HW_FENCE_NAME_SIZE 64 #define HW_FENCE_DEBUG_MAX_LOOPS 200 u32 msm_hw_fence_debug_level = HW_FENCE_PRINTK; @@ -38,20 +37,6 @@ struct client_data { struct list_head list; }; -/** - * struct hw_dma_fence - fences created by hw-fence for debugging. - * @base: base dma-fence structure, this must remain at beginning of the struct. - * @name: name of each fence. - * @client_handle: handle for the client owner of this fence, this is returned by the hw-fence - * driver after a successful registration of the client and used by this fence - * during release. - */ -struct hw_dma_fence { - struct dma_fence base; - char name[HW_FENCE_NAME_SIZE]; - void *client_handle; -}; - #if IS_ENABLED(CONFIG_DEBUG_FS) static int _get_debugfs_input_client(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos, @@ -155,67 +140,6 @@ static const struct file_operations hw_fence_dbg_ipcc_fops = { .write = hw_fence_dbg_ipcc_write, }; -static inline struct hw_dma_fence *to_hw_dma_fence(struct dma_fence *fence) -{ - return container_of(fence, struct hw_dma_fence, base); -} - -static const char *hw_fence_dbg_get_driver_name(struct dma_fence *fence) -{ - struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); - - return hw_dma_fence->name; -} - -static const char *hw_fence_dbg_get_timeline_name(struct dma_fence *fence) -{ - struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); - - return hw_dma_fence->name; -} - -static bool hw_fence_dbg_enable_signaling(struct dma_fence *fence) -{ - return true; -} - -static void _hw_fence_release(struct hw_dma_fence *hw_dma_fence) -{ - if (IS_ERR_OR_NULL(hw_dma_fence->client_handle)) { - HWFNC_ERR("invalid hwfence data, won't release hw_fence\n"); - return; - } - - /* release hw-fence */ - if (msm_hw_fence_destroy(hw_dma_fence->client_handle, &hw_dma_fence->base)) - HWFNC_ERR("failed to release hw_fence\n"); -} - -static void hw_fence_dbg_release(struct dma_fence *fence) -{ - struct hw_dma_fence *hw_dma_fence; - - if (!fence) - return; - - HWFNC_DBG_H("release backing fence %pK\n", fence); - hw_dma_fence = to_hw_dma_fence(fence); - - if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) - _hw_fence_release(hw_dma_fence); - - kfree(fence->lock); - kfree(hw_dma_fence); -} - -static struct dma_fence_ops hw_fence_dbg_ops = { - .get_driver_name = hw_fence_dbg_get_driver_name, - .get_timeline_name = hw_fence_dbg_get_timeline_name, - .enable_signaling = hw_fence_dbg_enable_signaling, - .wait = dma_fence_default_wait, - .release = hw_fence_dbg_release, -}; - struct client_data *_get_client_node(struct hw_fence_driver_data *drv_data, u32 client_id) { struct client_data *node = NULL; @@ -321,29 +245,6 @@ static ssize_t hw_fence_dbg_register_clients_wr(struct file *file, return count; } -struct hw_fence_out_clients_map { - int ipc_client_id; /* ipc client id for the hw fence client */ - int ipc_signal_id; /* ipc signal id for the hw fence client */ -}; - -/* NOTE: These signals are the ones that the actual clients should be triggering, hw-fence driver - * does not need to have knowledge of these signals. Adding them here for debugging purposes. - * Only fence controller and the cliens know these id's, since these - * are to trigger the ipcc from the 'client hw-core' to the 'hw-fence controller' - * - * Note that the index of this struct must match the enum hw_fence_client_id - */ -struct hw_fence_out_clients_map dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS, 0}, /* CTRL_LOOPBACK */ - {HW_FENCE_IPC_CLIENT_ID_GPU, 0}, /* CTX0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 2}, /* CTL0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 4}, /* CTL1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 6}, /* CTL2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 8}, /* CTL3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 10}, /* CTL4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 12} /* CTL5 */ -}; - /** * hw_fence_dbg_tx_and_signal_clients_wr() - debugfs write to simulate the lifecycle of a hw-fence. * @file: file handler. @@ -761,21 +662,7 @@ static ssize_t hw_fence_dbg_dump_table_wr(struct file *file, return user_buf_size; } -static void _cleanup_fences(int i, struct dma_fence **fences, spinlock_t **fences_lock) -{ - struct hw_dma_fence *dma_fence; - int idx; - for (idx = i; idx >= 0 ; idx--) { - kfree(fences_lock[idx]); - - dma_fence = to_hw_dma_fence(fences[idx]); - kfree(dma_fence); - } - - kfree(fences_lock); - kfree(fences); -} /** * hw_fence_dbg_create_join_fence() - debugfs write to simulate the lifecycle of a join hw-fence. @@ -840,7 +727,7 @@ static ssize_t hw_fence_dbg_create_join_fence(struct file *file, for (i = 0; i < num_fences; i++) { struct hw_dma_fence *dma_fence; - fences_lock[i] = kzalloc(sizeof(*fences_lock), GFP_KERNEL); + fences_lock[i] = kzalloc(sizeof(spinlock_t), GFP_KERNEL); if (!fences_lock[i]) { _cleanup_fences(i, fences, fences_lock); return -ENOMEM; @@ -916,6 +803,39 @@ error: return count; } +int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, + int client_id) +{ + struct msm_hw_fence_client *hw_fence_client; + + if (client_id < HW_FENCE_LOOPBACK_VAL_0 || client_id > HW_FENCE_LOOPBACK_VAL_6) { + HWFNC_ERR("invalid client_id: %d min: %d max: %d\n", client_id, + HW_FENCE_LOOPBACK_VAL_0, HW_FENCE_LOOPBACK_VAL_6); + return -EINVAL; + } + + mutex_lock(&drv_data->clients_mask_lock); + + if (!drv_data->clients[client_id]) { + mutex_unlock(&drv_data->clients_mask_lock); + return -EINVAL; + } + + hw_fence_client = drv_data->clients[client_id]; + + HWFNC_DBG_IRQ("Processing validation client workaround client_id:%d\n", client_id); + + /* set the atomic flag, to signal the client wait */ + atomic_set(&hw_fence_client->val_signal, 1); + + /* wake-up waiting client */ + wake_up_all(&hw_fence_client->wait_queue); + + mutex_unlock(&drv_data->clients_mask_lock); + + return 0; +} + static const struct file_operations hw_fence_reset_client_fops = { .open = simple_open, .write = hw_fence_dbg_reset_client_wr, @@ -988,6 +908,7 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data) debugfs_create_u32("hw_fence_debug_level", 0600, debugfs_root, &msm_hw_fence_debug_level); debugfs_create_file("hw_fence_dump_table", 0600, debugfs_root, drv_data, &hw_fence_dump_table_fops); + debugfs_create_file("hw_sync", 0600, debugfs_root, NULL, &hw_sync_debugfs_fops); return 0; } diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index 7879d4f788..c3414a20da 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -40,6 +40,15 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_M {HW_FENCE_IPC_CLIENT_ID_APPS, 17, false}, /* ctl3 */ {HW_FENCE_IPC_CLIENT_ID_APPS, 18, false}, /* ctl4 */ {HW_FENCE_IPC_CLIENT_ID_APPS, 19, false}, /* ctl5 */ +#if IS_ENABLED(CONFIG_DEBUG_FS) + {HW_FENCE_IPC_CLIENT_ID_APPS, 21, true}, /* val0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 22, true}, /* val1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 23, true}, /* val2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 24, true}, /* val3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 25, true}, /* val4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 26, true}, /* val5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 27, true}, /* val6 */ +#endif /* CONFIG_DEBUG_FS */ }; /** @@ -59,6 +68,15 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = { {HW_FENCE_IPC_CLIENT_ID_DPU, 3, false}, /* ctl3 */ {HW_FENCE_IPC_CLIENT_ID_DPU, 4, false}, /* ctl4 */ {HW_FENCE_IPC_CLIENT_ID_DPU, 5, false}, /* ctl5 */ +#if IS_ENABLED(CONFIG_DEBUG_FS) + {HW_FENCE_IPC_CLIENT_ID_APPS, 21, true}, /* val0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 22, true}, /* val1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 23, true}, /* val2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 24, true}, /* val3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 25, true}, /* val4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 26, true}, /* val5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 27, true}, /* val6 */ +#endif /* CONFIG_DEBUG_FS */ }; int hw_fence_ipcc_get_client_id(struct hw_fence_driver_data *drv_data, u32 client_id) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index edecc41cbc..e6aa770468 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -539,6 +539,17 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, case HW_FENCE_CLIENT_ID_CTX0: /* nothing to initialize for gpu client */ break; +#if IS_ENABLED(CONFIG_DEBUG_FS) + case HW_FENCE_CLIENT_ID_VAL0: + case HW_FENCE_CLIENT_ID_VAL1: + case HW_FENCE_CLIENT_ID_VAL2: + case HW_FENCE_CLIENT_ID_VAL3: + case HW_FENCE_CLIENT_ID_VAL4: + case HW_FENCE_CLIENT_ID_VAL5: + case HW_FENCE_CLIENT_ID_VAL6: + /* nothing to initialize for validation clients */ + break; +#endif /* CONFIG_DEBUG_FS */ case HW_FENCE_CLIENT_ID_CTL0: case HW_FENCE_CLIENT_ID_CTL1: case HW_FENCE_CLIENT_ID_CTL2: diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 226df1e0cb..73d2c83273 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -134,6 +134,17 @@ static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int c case HW_FENCE_LOOPBACK_GFX_CTX_0: ret = _process_gfx_client_loopback(drv_data, client_id); break; +#if IS_ENABLED(CONFIG_DEBUG_FS) + case HW_FENCE_LOOPBACK_VAL_0: + case HW_FENCE_LOOPBACK_VAL_1: + case HW_FENCE_LOOPBACK_VAL_2: + case HW_FENCE_LOOPBACK_VAL_3: + case HW_FENCE_LOOPBACK_VAL_4: + case HW_FENCE_LOOPBACK_VAL_5: + case HW_FENCE_LOOPBACK_VAL_6: + ret = process_validation_client_loopback(drv_data, client_id); + break; +#endif /* CONFIG_DEBUG_FS */ default: HWFNC_ERR("unknown client:%d\n", client_id); ret = -EINVAL; diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c new file mode 100644 index 0000000000..8ff2bdfb02 --- /dev/null +++ b/hw_fence/src/hw_fence_ioctl.c @@ -0,0 +1,711 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_debug.h" + +#define HW_SYNC_IOCTL_COUNT ARRAY_SIZE(hw_sync_debugfs_ioctls) +#define HW_FENCE_ARRAY_SIZE 10 +#define HW_SYNC_IOC_MAGIC 'W' +#define HW_SYNC_IOC_REG_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 10, unsigned long) +#define HW_SYNC_IOC_UNREG_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 11, unsigned long) +#define HW_SYNC_IOC_CREATE_FENCE _IOWR(HW_SYNC_IOC_MAGIC, 12,\ + struct hw_fence_sync_create_data) +#define HW_SYNC_IOC_DESTROY_FENCE _IOWR(HW_SYNC_IOC_MAGIC, 13,\ + struct hw_fence_sync_create_data) +#define HW_SYNC_IOC_CREATE_FENCE_ARRAY _IOWR(HW_SYNC_IOC_MAGIC, 14,\ + struct hw_fence_array_sync_create_data) +#define HW_SYNC_IOC_DESTROY_FENCE_ARRAY _IOWR(HW_SYNC_IOC_MAGIC, 15,\ + struct hw_fence_array_sync_create_data) +#define HW_SYNC_IOC_REG_FOR_WAIT _IOWR(HW_SYNC_IOC_MAGIC, 16, int) +#define HW_SYNC_IOC_FENCE_SIGNAL _IOWR(HW_SYNC_IOC_MAGIC, 17, unsigned long) +#define HW_SYNC_IOC_FENCE_WAIT _IOWR(HW_SYNC_IOC_MAGIC, 18, int) +#define HW_SYNC_IOC_RESET_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 19, unsigned long) +#define HW_FENCE_IOCTL_NR(n) (_IOC_NR(n) - 2) +#define HW_IOCTL_DEF(ioctl, _func) \ + [HW_FENCE_IOCTL_NR(ioctl)] = { \ + .cmd = ioctl, \ + .func = _func, \ + .name = #ioctl \ + } + +/** + * struct hw_sync_obj - per client hw sync object. + * @context: context id used to create fences. + * @client_id: to uniquely represent client. + * @client_handle: Pointer to the structure holding the resources + * allocated to the client. + * @mem_descriptor: Memory descriptor of the queue allocated by the + * hardware fence driver for each client during register. + */ +struct hw_sync_obj { + u64 context; + int client_id; + void *client_handle; + struct msm_hw_fence_mem_addr mem_descriptor; +}; + +/** + * struct hw_fence_sync_create_data - data used in creating fences. + * @seqno: sequence number. + * @incr_context: if set, then the context would be incremented. + * @fence: returns the fd of the new sync_file with the created fence. + * @hash: fence hash + */ +struct hw_fence_sync_create_data { + u64 seqno; + bool incr_context; + __s32 fence; + u64 hash; +}; + +/** + * struct hw_fence_array_sync_create_data - data used in creating multiple fences. + * @seqno: array of sequence numbers used to create fences. + * @num_fences: number of fences to be created. + * @fence: return the fd of the new sync_file with the created fence. + * @hash: array of fence hash + */ +struct hw_fence_array_sync_create_data { + u64 seqno[HW_FENCE_ARRAY_SIZE]; + int num_fences; + __s32 fence; + u64 hash[HW_FENCE_ARRAY_SIZE]; +}; + +/** + * struct hw_fence_sync_signal_data - data used to signal fences. + * @hash: hash of the fence. + * @error_flag: error flag + */ +struct hw_fence_sync_signal_data { + u64 hash; + u32 error_flag; +}; + +/** + * struct hw_fence_sync_wait_data - data used to wait on fences. + * @fence: fence fd. + * @timeout_ms: fence wait time out. + */ +struct hw_fence_sync_wait_data { + __s32 fence; + u64 timeout_ms; +}; + +/** + * struct hw_fence_sync_reset_data - data used to reset client. + * @client_id: client id. + * @reset_flag: reset flag + */ +struct hw_fence_sync_reset_data { + int client_id; + u32 reset_flag; +}; + +typedef long hw_fence_ioctl_t(struct hw_sync_obj *obj, unsigned long arg); + +/** + * struct hw_sync_ioctl_def - hw_sync driver ioctl entry + * @cmd: ioctl command number, without flags + * @func: handler for this ioctl + * @name: user-readable name for debug output + */ +struct hw_sync_ioctl_def { + unsigned int cmd; + hw_fence_ioctl_t *func; + const char *name; +}; + +static bool _is_valid_client(struct hw_sync_obj *obj) +{ + if (!obj) + return false; + + if (obj->client_id < HW_FENCE_CLIENT_ID_VAL0 || obj->client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", obj->client_id, + HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_MAX); + return false; + } + + return true; +} + +static int _get_client_id(struct hw_sync_obj *obj, unsigned long arg) +{ + int client_id; + + if (copy_from_user(&client_id, (void __user *)arg, sizeof(client_id))) + return -EFAULT; + + if (!obj) + return -EINVAL; + + if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", client_id, + HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_MAX); + return -EINVAL; + } + + return client_id; +} + +static void *_hw_sync_get_fence(int fd) +{ + return fd >= 0 ? sync_file_get_fence(fd) : NULL; +} + +static int hw_sync_debugfs_open(struct inode *inode, struct file *file) +{ + struct hw_sync_obj *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return -ENOMEM; + + obj->context = dma_fence_context_alloc(1); + file->private_data = obj; + + return 0; +} + +static int hw_sync_debugfs_release(struct inode *inode, struct file *file) +{ + struct hw_sync_obj *obj = file->private_data; + + if (!obj) + return -EINVAL; + + kfree(obj); + + return 0; +} + +static long hw_sync_ioctl_reg_client(struct hw_sync_obj *obj, unsigned long arg) +{ + int client_id = _get_client_id(obj, arg); + + if (IS_ERR(&client_id)) { + return client_id; + } else if (obj->client_handle) { + HWFNC_ERR("client:%d already registered as validation client\n", client_id); + return -EINVAL; + } + + obj->client_id = client_id; + obj->client_handle = msm_hw_fence_register(obj->client_id, &obj->mem_descriptor); + if (IS_ERR_OR_NULL(obj->client_handle)) + return -EINVAL; + + return 0; +} + +static long hw_sync_ioctl_unreg_client(struct hw_sync_obj *obj, unsigned long arg) +{ + int client_id = _get_client_id(obj, arg); + + if (IS_ERR(&client_id)) + return client_id; + + return msm_hw_fence_deregister(obj->client_handle); +} + +static long hw_sync_ioctl_create_fence(struct hw_sync_obj *obj, unsigned long arg) +{ + struct msm_hw_fence_create_params params; + struct hw_fence_sync_create_data data; + struct hw_dma_fence *fence; + spinlock_t *fence_lock; + u64 hash; + struct sync_file *sync_file; + int fd, ret; + + if (!_is_valid_client(obj)) { + return -EINVAL; + } else if (IS_ERR_OR_NULL(obj->client_handle)) { + HWFNC_ERR("client:%d is not register as validation client\n", obj->client_id); + return -EINVAL; + } + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + /* create dma fence */ + fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL); + if (!fence_lock) + return -ENOMEM; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (!fence) { + kfree(fence_lock); + return -ENOMEM; + } + + snprintf(fence->name, HW_FENCE_NAME_SIZE, "hwfence:id:%d:ctx=%lu:seqno:%lu", + obj->client_id, obj->context, data.seqno); + + spin_lock_init(fence_lock); + dma_fence_init(&fence->base, &hw_fence_dbg_ops, fence_lock, obj->context, data.seqno); + + HWFNC_DBG_H("creating hw_fence for client:%d ctx:%llu seqno:%llu\n", obj->client_id, + obj->context, data.seqno); + params.fence = &fence->base; + params.handle = &hash; + + /* create hw fence */ + ret = msm_hw_fence_create(obj->client_handle, ¶ms); + if (ret) { + HWFNC_ERR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n", + obj->client_id, obj->context, data.seqno); + dma_fence_put(&fence->base); + return -EINVAL; + } + + /* keep handle in dma_fence, to destroy hw-fence during release */ + fence->client_handle = obj->client_handle; + + if (data.incr_context) + obj->context = dma_fence_context_alloc(1); + + /* create fd */ + fd = get_unused_fd_flags(0); + if (fd < 0) { + HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id); + dma_fence_put(&fence->base); + return fd; + } + + sync_file = sync_file_create(&fence->base); + if (sync_file == NULL) { + HWFNC_ERR("couldn't create fence fd, %d\n", fd); + dma_fence_put(&fence->base); + ret = -EINVAL; + goto exit; + } + + /* Decrement the refcount that sync_file_create increments */ + dma_fence_put(&fence->base); + + data.fence = fd; + data.hash = hash; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + dma_fence_put(&fence->base); + fput(sync_file->file); + ret = -EFAULT; + goto exit; + } + + fd_install(fd, sync_file->file); + + return 0; + +exit: + put_unused_fd(fd); + return ret; +} + +static long hw_sync_ioctl_destroy_fence(struct hw_sync_obj *obj, unsigned long arg) +{ + int fd; + struct hw_dma_fence *fence; + struct hw_fence_sync_create_data data; + + if (!_is_valid_client(obj)) + return -EINVAL; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + fd = data.fence; + fence = (struct hw_dma_fence *)_hw_sync_get_fence(fd); + + if (!fence) { + HWFNC_ERR("fence for fd:%d not found\n", fd); + return -EINVAL; + } + + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(&fence->base); + + /* To destroy fence */ + dma_fence_put(&fence->base); + + return 0; +} + +static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned long arg) +{ + struct dma_fence_array *fence_array; + struct hw_fence_array_sync_create_data data; + struct dma_fence **fences = NULL; + struct msm_hw_fence_create_params params; + struct sync_file *sync_file; + spinlock_t **fence_lock = NULL; + int num_fences, i, fd, ret; + u64 hash; + + if (!_is_valid_client(obj)) { + return -EINVAL; + } else if (IS_ERR_OR_NULL(obj->client_handle)) { + HWFNC_ERR("client:%d is not register as validation client\n", obj->client_id); + return -EINVAL; + } + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + num_fences = data.num_fences; + if (num_fences >= HW_FENCE_ARRAY_SIZE) { + HWFNC_ERR("Number of fences: %d is greater than allowed size: %d\n", + num_fences, HW_FENCE_ARRAY_SIZE); + return -EINVAL; + } + fence_lock = kcalloc(num_fences, sizeof(*fence_lock), GFP_KERNEL); + if (!fence_lock) + return -ENOMEM; + + fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); + if (!fences) { + kfree(fence_lock); + return -ENOMEM; + } + + /* + * Create the array of dma fences + * This API takes seqno[num_fences] as the seqno for the fence-array + * and from 0 to (num_fences - 1) for the fences in the array. + */ + for (i = 0; i < num_fences; i++) { + struct hw_dma_fence *dma_fence; + + fence_lock[i] = kzalloc(sizeof(spinlock_t), GFP_KERNEL); + if (!fence_lock[i]) { + _cleanup_fences(i, fences, fence_lock); + return -ENOMEM; + } + + dma_fence = kzalloc(sizeof(*dma_fence), GFP_KERNEL); + if (!dma_fence) { + _cleanup_fences(i, fences, fence_lock); + return -ENOMEM; + } + fences[i] = &dma_fence->base; + + spin_lock_init(fence_lock[i]); + dma_fence_init(fences[i], &hw_fence_dbg_ops, fence_lock[i], + obj->context, data.seqno[i]); + } + + /* create the fence array from array of dma fences */ + fence_array = dma_fence_array_create(num_fences, fences, obj->context, data.seqno[i], 0); + if (!fence_array) { + HWFNC_ERR("Error creating fence_array\n"); + _cleanup_fences(num_fences - 1, fences, fence_lock); + return -EINVAL; + } + + /* create hw fences */ + for (i = 0; i < num_fences; i++) { + params.fence = fences[i]; + params.handle = &hash; + + ret = msm_hw_fence_create(obj->client_handle, ¶ms); + if (ret) { + HWFNC_ERR("Error creating HW fence\n"); + dma_fence_put(&fence_array->base); + /* + * free array of pointers, no need to call kfree in 'fences', + * since that is released from the fence-array release api + */ + kfree(fence_lock); + kfree(fence_array); + return -EINVAL; + } + + /* keep handle in dma_fence, to destroy hw-fence during release */ + to_hw_dma_fence(fences[i])->client_handle = obj->client_handle; + data.hash[i] = hash; + } + + /* create fd */ + fd = get_unused_fd_flags(0); + if (fd < 0) { + HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id); + dma_fence_put(&fence_array->base); + kfree(fence_lock); + kfree(fence_array); + return fd; + } + + sync_file = sync_file_create(&fence_array->base); + if (sync_file == NULL) { + HWFNC_ERR("couldn't create fence fd, %d\n", fd); + dma_fence_put(&fence_array->base); + kfree(fence_lock); + kfree(fence_array); + ret = -EINVAL; + goto exit; + } + + /* Decrement the refcount that sync_file_create increments */ + dma_fence_put(&fence_array->base); + + data.fence = fd; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + fput(sync_file->file); + dma_fence_put(&fence_array->base); + kfree(fence_lock); + kfree(fence_array); + ret = -EFAULT; + goto exit; + } + + fd_install(fd, sync_file->file); + + return 0; + +exit: + put_unused_fd(fd); + return ret; +} + +static long hw_sync_ioctl_destroy_fence_array(struct hw_sync_obj *obj, unsigned long arg) +{ + struct dma_fence_array *fence_array; + struct dma_fence *fence; + struct hw_fence_array_sync_create_data data; + int fd; + + if (!_is_valid_client(obj)) + return -EINVAL; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + fd = data.fence; + fence = (struct dma_fence *)_hw_sync_get_fence(fd); + if (!fence) { + HWFNC_ERR("Invalid fence fd: %d\n", fd); + return -EINVAL; + } + + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + + fence_array = to_dma_fence_array(fence); + if (!fence_array) { + HWFNC_ERR("Invalid fence array fd: %d\n", fd); + return -EINVAL; + } + + /* Destroy fence array */ + dma_fence_put(&fence_array->base); + + return 0; +} + +/* + * this IOCTL only supports receiving one fence as input-parameter, which can be + * either a "dma_fence" or a "dma_fence_array", but eventually we would expand + * this API to receive more fences + */ +static long hw_sync_ioctl_reg_for_wait(struct hw_sync_obj *obj, unsigned long arg) +{ + struct dma_fence *fence; + int ret, fd, num_fences = 1; + + if (!_is_valid_client(obj)) + return -EINVAL; + + if (copy_from_user(&fd, (void __user *)arg, sizeof(fd))) + return -EFAULT; + + fence = (struct dma_fence *)_hw_sync_get_fence(fd); + if (!fence) { + HWFNC_ERR("Invalid fence fd: %d\n", fd); + return -EINVAL; + } + + ret = msm_hw_fence_wait_update(obj->client_handle, &fence, num_fences, 1); + + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + + return ret; +} + +static long hw_sync_ioctl_fence_signal(struct hw_sync_obj *obj, unsigned long arg) +{ + struct hw_fence_sync_signal_data data; + int ret, tx_client, rx_client, signal_id; + + if (!_is_valid_client(obj)) { + return -EINVAL; + } else if (IS_ERR_OR_NULL(obj->client_handle)) { + HWFNC_ERR("invalid client handle for the client_id: %d\n", obj->client_id); + return -EINVAL; + } + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + ret = msm_hw_fence_update_txq(obj->client_handle, data.hash, 0, data.error_flag); + if (ret) { + HWFNC_ERR("hw fence update txq has failed client_id: %d\n", obj->client_id); + return ret; + } + + signal_id = dbg_out_clients_signal_map_no_dpu[obj->client_id].ipc_signal_id; + if (signal_id < 0) + return -EINVAL; + + tx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + rx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + ret = msm_hw_fence_trigger_signal(obj->client_handle, tx_client, rx_client, signal_id); + if (ret) { + HWFNC_ERR("hw fence trigger signal has failed\n"); + return ret; + } + + return 0; +} + +static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg) +{ + struct msm_hw_fence_client *hw_fence_client; + struct msm_hw_fence_queue_payload payload; + struct hw_fence_sync_wait_data data; + struct dma_fence *fence; + int fd, ret, read = 1, queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */ + + if (!_is_valid_client(obj)) + return -EINVAL; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + fd = data.fence; + fence = (struct dma_fence *)_hw_sync_get_fence(fd); + if (!fence) { + HWFNC_ERR("Invalid fence fd: %d\n", fd); + return -EINVAL; + } + + hw_fence_client = (struct msm_hw_fence_client *)obj->client_handle; + if (!hw_fence_client) { + HWFNC_ERR("invalid client handle for fd:%d\n", fd); + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + return -EINVAL; + } + + ret = wait_event_timeout(hw_fence_client->wait_queue, + atomic_read(&hw_fence_client->val_signal) > 0, + msecs_to_jiffies(data.timeout_ms)); + if (!ret) { + HWFNC_ERR("timed out waiting for the client signal %d\n", data.timeout_ms); + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + return -ETIMEDOUT; + } + + /* clear doorbell signal flag */ + atomic_set(&hw_fence_client->val_signal, 0); + + while (read) { + read = hw_fence_read_queue(obj->client_handle, &payload, queue_type); + if (read < 0) { + HWFNC_ERR("unable to read client rxq client_id:%d\n", obj->client_id); + break; + } + HWFNC_DBG_L("rxq read: hash:%llu, flags:%llu, error:%lu\n", + payload.hash, payload.flags, payload.error); + if (payload.ctxt_id == fence->context && payload.seqno == fence->seqno) { + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + return 0; + } + } + + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + + HWFNC_ERR("fence received did not match the fence expected\n"); + HWFNC_ERR("fence received: context:%d seqno:%d fence expected: context:%d seqno:%d\n", + payload.ctxt_id, payload.seqno, fence->context, fence->seqno); + + return read; +} + +static long hw_sync_ioctl_reset_client(struct hw_sync_obj *obj, unsigned long arg) +{ + int ret; + struct hw_fence_sync_reset_data data; + + if (!_is_valid_client(obj)) { + return -EINVAL; + } else if (IS_ERR_OR_NULL(obj->client_handle)) { + HWFNC_ERR("client:%d handle doesn't exists\n", data.client_id); + return -EINVAL; + } + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + ret = msm_hw_fence_reset_client(obj->client_handle, data.reset_flag); + if (ret) { + HWFNC_ERR("hw fence reset client has failed\n"); + return ret; + } + + return 0; +} + +static const struct hw_sync_ioctl_def hw_sync_debugfs_ioctls[] = { + HW_IOCTL_DEF(HW_SYNC_IOC_REG_CLIENT, hw_sync_ioctl_reg_client), + HW_IOCTL_DEF(HW_SYNC_IOC_UNREG_CLIENT, hw_sync_ioctl_unreg_client), + HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE, hw_sync_ioctl_create_fence), + HW_IOCTL_DEF(HW_SYNC_IOC_DESTROY_FENCE, hw_sync_ioctl_destroy_fence), + HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE_ARRAY, hw_sync_ioctl_create_fence_array), + HW_IOCTL_DEF(HW_SYNC_IOC_DESTROY_FENCE_ARRAY, hw_sync_ioctl_destroy_fence_array), + HW_IOCTL_DEF(HW_SYNC_IOC_REG_FOR_WAIT, hw_sync_ioctl_reg_for_wait), + HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_SIGNAL, hw_sync_ioctl_fence_signal), + HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_WAIT, hw_sync_ioctl_fence_wait), + HW_IOCTL_DEF(HW_SYNC_IOC_RESET_CLIENT, hw_sync_ioctl_reset_client) +}; + +static long hw_sync_debugfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct hw_sync_obj *obj = file->private_data; + int num = HW_FENCE_IOCTL_NR(cmd); + hw_fence_ioctl_t *func; + + if (num >= HW_SYNC_IOCTL_COUNT) { + HWFNC_ERR("invalid ioctl num = %d\n", num); + return -EINVAL; + } + + func = (&hw_sync_debugfs_ioctls[num])->func; + if (unlikely(!func)) { + HWFNC_ERR("no function num = %d\n", num); + return -ENOTTY; + } + + return func(obj, arg); +} + +const struct file_operations hw_sync_debugfs_fops = { + .open = hw_sync_debugfs_open, + .release = hw_sync_debugfs_release, + .unlocked_ioctl = hw_sync_debugfs_ioctl, +}; diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index e2e61947c4..9d34aa7ea1 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -98,6 +98,10 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id, hw_fence_client, hw_fence_client->client_id, hw_fence_client->ipc_signal_id, hw_fence_client->ipc_client_id); +#if IS_ENABLED(CONFIG_DEBUG_FS) + init_waitqueue_head(&hw_fence_client->wait_queue); +#endif /* CONFIG_DEBUG_FS */ + return (void *)hw_fence_client; error: From c11e6e06aad06feb8c85a3de77bf23da58044d90 Mon Sep 17 00:00:00 2001 From: Christina Oliveira Date: Wed, 29 Jun 2022 15:04:42 -0700 Subject: [PATCH 016/166] mm-drivers: sync: export sync_fence module symbols This change updates makefile to export sync_fence module symbols, so these can be imported by other external kernel modules. Change-Id: Idd64fae8f8797cbcb4b4012666ed5621fa83062f Signed-off-by: Christina Oliveira --- sync_fence/Android.mk | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/sync_fence/Android.mk b/sync_fence/Android.mk index 59ee256f05..d784b18e9c 100644 --- a/sync_fence/Android.mk +++ b/sync_fence/Android.mk @@ -18,6 +18,15 @@ KBUILD_OPTIONS := SYNC_FENCE_ROOT=$(SYNC_FENCE_BLD_DIR) KBUILD_OPTIONS += MODNAME=sync_fence KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) +########################################################### +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := sync-fence-module-symvers +LOCAL_MODULE_STEM := Module.symvers +LOCAL_MODULE_KBUILD_NAME := Module.symvers +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk ########################################################### include $(CLEAR_VARS) LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) From 0a5f30607f23bcc190dc8790288de0fa7fab2973 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 29 Jun 2022 10:18:02 -0700 Subject: [PATCH 017/166] mm-drivers: hw_fence: enable hw-fence driver based on cmdline var This change ensures that the hw-fence driver is disabled by default and can be enabled or disabled based on a kernel command line argument. If the hw-fence driver is disabled, msm_hw_fence_probe returns an error. Change-Id: I248f29158c17a43151aa8b0c980a7ce0f5e758d6 Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 0c8fd65d60..71288b7919 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -15,6 +15,7 @@ #include "hw_fence_drv_ipc.h" struct hw_fence_driver_data *hw_fence_drv_data; +static bool hw_fence_driver_enable; void *msm_hw_fence_register(enum hw_fence_client_id client_id, struct msm_hw_fence_mem_addr *mem_descriptor) @@ -420,6 +421,11 @@ static int msm_hw_fence_probe(struct platform_device *pdev) return -EINVAL; } + if (!hw_fence_driver_enable) { + HWFNC_DBG_INFO("hw fence driver not enabled\n"); + return -EOPNOTSUPP; + } + if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-hw-fence")) rc = msm_hw_fence_probe_init(pdev); if (rc) @@ -499,6 +505,9 @@ static void __exit msm_hw_fence_exit(void) HWFNC_DBG_H("-\n"); } +module_param_named(enable, hw_fence_driver_enable, bool, 0600); +MODULE_PARM_DESC(enable, "Enable hardware fences"); + module_init(msm_hw_fence_init); module_exit(msm_hw_fence_exit); From 8ad0ce90d22bd18576af5d34403ff001b1c5eb4b Mon Sep 17 00:00:00 2001 From: Bruce Hoo Date: Mon, 9 May 2022 18:56:17 +0800 Subject: [PATCH 018/166] mm-drivers: spec_fence: increasing device_available Spec_fence device is already used by surfaceflinger, increase device_available by 1. Change-Id: I3795ffc40fb2ca95e933d4ed056dc9a4c628ba1e Signed-off-by: Bruce Hoo --- sync_fence/src/qcom_sync_file.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/sync_fence/src/qcom_sync_file.c b/sync_fence/src/qcom_sync_file.c index b3ecf4eb1f..f054f80e34 100644 --- a/sync_fence/src/qcom_sync_file.c +++ b/sync_fence/src/qcom_sync_file.c @@ -105,14 +105,17 @@ static void clear_fence_array_tracker(bool force_clear) static struct sync_device *spec_fence_init_locked(struct sync_device *obj, const char *name) { - if (atomic_read(&obj->device_available)) + if (atomic_read(&obj->device_available) > 1) { + pr_err("number of device fds are limited by 2, device opened:%d\n", + atomic_read(&obj->device_available)); return NULL; + } else if (!atomic_read(&obj->device_available)) { + memset(obj->name, 0, NAME_LEN); + strscpy(obj->name, name, sizeof(obj->name)); + } atomic_inc(&obj->device_available); - memset(obj->name, 0, NAME_LEN); - strlcpy(obj->name, name, sizeof(obj->name)); - return obj; } @@ -153,14 +156,16 @@ static int spec_sync_release(struct inode *inode, struct file *file) mutex_lock(&sync_dev.lock); if (!atomic_read(&obj->device_available)) { - pr_err("sync release failed !!\n"); + pr_err("no device to release!!\n"); ret = -ENODEV; goto end; } - clear_fence_array_tracker(true); atomic_dec(&obj->device_available); + if (!atomic_read(&obj->device_available)) + clear_fence_array_tracker(true); + end: mutex_unlock(&sync_dev.lock); return ret; From 93afde537dddfaab1a96eacf1e515a3396754930 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Tue, 21 Jun 2022 09:45:18 -0700 Subject: [PATCH 019/166] mm-drivers: hw_fence: move mem barrier before mem read Move memory barriers to ensure data is available before the read of indexes from the queues. Change-Id: I3b5a7903f038cc62b461fbfc9cbeb143b862a1f1 Signed-off-by: Ingrid Gallardo --- hw_fence/src/hw_fence_drv_priv.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index cdfe9a81c2..a980fb6bfa 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -166,13 +166,13 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, return -EINVAL; } + /* Make sure data is ready before read */ + mb(); + /* Get read and write index */ read_idx = readl_relaxed(&hfi_header->read_index); write_idx = readl_relaxed(&hfi_header->write_index); - /* Make sure we read the values */ - rmb(); - HWFNC_DBG_Q("read client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n", hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, read_idx, write_idx, queue); @@ -274,13 +274,13 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, GLOBAL_ATOMIC_STORE(&drv_data->client_lock_tbl[lock_idx], 1); /* lock */ } + /* Make sure data is ready before read */ + mb(); + /* Get read and write index */ read_idx = readl_relaxed(&hfi_header->read_index); write_idx = readl_relaxed(&hfi_header->write_index); - /* Make sure we read the values */ - rmb(); - HWFNC_DBG_Q("wr client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d q:0x%pK type:%d\n", hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, read_idx, write_idx, queue, queue_type); From 05b50f9290f4ab2f425b7a03b648207b2e59574a Mon Sep 17 00:00:00 2001 From: Ashwin Pillai Date: Thu, 14 Jul 2022 16:50:18 -0400 Subject: [PATCH 020/166] mm-drivers: add support for build.sh techpack display_tp add environment variable to be used by display-techpack.mk for build.sh techpack display_tp. Change-Id: I46b0ac3fb40371e3282191c75a501230243d9f52 Signed-off-by: Ashwin Pillai --- mm_driver_product.mk | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm_driver_product.mk b/mm_driver_product.mk index 4c2a5d2fe9..bb98492d0a 100644 --- a/mm_driver_product.mk +++ b/mm_driver_product.mk @@ -13,3 +13,5 @@ ifeq ($(MM_DRV_DLKM_ENABLE), true) PRODUCT_PACKAGES += sync_fence.ko msm_hw_fence.ko endif endif + +DISPLAY_MM_DRIVER := msm_ext_display.ko sync_fence.ko msm_hw_fence.ko \ No newline at end of file From c344a18254ddcb9beb242026b327a09c82f3ef3d Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Wed, 1 Jun 2022 12:12:46 -0700 Subject: [PATCH 021/166] mm-drivers: hw_fence: add timestamp to the queue Add qtimer timestamps to queue payloads. This timestamp is to be updated by the client that adds the entry to the queue. Change-Id: I69dd4420ec18b7470f99d5cfe46129c10b3f3391 Signed-off-by: Ingrid Gallardo --- hw_fence/include/hw_fence_drv_priv.h | 3 +- hw_fence/src/hw_fence_drv_debug.c | 86 ++++++++++++++++++++++++++++ hw_fence/src/hw_fence_drv_priv.c | 1 + 3 files changed, 89 insertions(+), 1 deletion(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 8de604ee0d..8ce864639c 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -326,6 +326,7 @@ struct hw_fence_driver_data { * @error: error code for this fence, fence controller receives this * error from the signaling client through the tx queue and * propagates the error to the waiting client through rx queue + * @timestamp: qtime when the payload is written into the queue */ struct msm_hw_fence_queue_payload { u64 ctxt_id; @@ -333,7 +334,7 @@ struct msm_hw_fence_queue_payload { u64 hash; u64 flags; u32 error; - u32 unused; /* align to 64-bit */ + u32 timestamp; }; /** diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index f872c4c197..a1e80ace58 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -536,6 +536,85 @@ static int dump_full_table(struct hw_fence_driver_data *drv_data, char *buf, u32 return len; } +/** + * hw_fence_dbg_dump_queues_wr() - debugfs wr to dump the hw-fences queues. + * @file: file handler. + * @user_buf: user buffer content for debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs dumps the hw-fence queues. Takes as input the desired client to dump. + * Dumps to debug msgs the contents of the TX and RX queues for that client, if they exist. + */ +static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hw_fence_driver_data *drv_data; + struct msm_hw_fence_queue *rx_queue; + struct msm_hw_fence_queue *tx_queue; + u64 hash, ctx_id, seqno, timestamp, flags; + u32 *read_ptr, error; + int client_id, i; + struct msm_hw_fence_queue_payload *read_ptr_payload; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data %d\n", file); + return -EINVAL; + } + drv_data = file->private_data; + + client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data); + if (client_id < 0) + return -EINVAL; + + if (!drv_data->clients[client_id] || + IS_ERR_OR_NULL(&drv_data->clients[client_id]->queues[HW_FENCE_RX_QUEUE - 1]) || + IS_ERR_OR_NULL(&drv_data->clients[client_id]->queues[HW_FENCE_TX_QUEUE - 1])) { + HWFNC_ERR("client %d not initialized\n", client_id); + return -EINVAL; + } + + HWFNC_DBG_L("Queues for client %d\n", client_id); + + rx_queue = &drv_data->clients[client_id]->queues[HW_FENCE_RX_QUEUE - 1]; + tx_queue = &drv_data->clients[client_id]->queues[HW_FENCE_TX_QUEUE - 1]; + + HWFNC_DBG_L("-------RX QUEUE------\n"); + for (i = 0; i < drv_data->hw_fence_queue_entries; i++) { + read_ptr = ((u32 *)rx_queue->va_queue + + (i * (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)))); + read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr; + + ctx_id = readq_relaxed(&read_ptr_payload->ctxt_id); + seqno = readq_relaxed(&read_ptr_payload->seqno); + hash = readq_relaxed(&read_ptr_payload->hash); + flags = readq_relaxed(&read_ptr_payload->flags); + error = readl_relaxed(&read_ptr_payload->error); + timestamp = readl_relaxed(&read_ptr_payload->timestamp); + + HWFNC_DBG_L("rx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%u\n", + i, hash, ctx_id, seqno, flags, error, timestamp); + } + + HWFNC_DBG_L("-------TX QUEUE------\n"); + for (i = 0; i < drv_data->hw_fence_queue_entries; i++) { + read_ptr = ((u32 *)tx_queue->va_queue + + (i * (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)))); + read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr; + + ctx_id = readq_relaxed(&read_ptr_payload->ctxt_id); + seqno = readq_relaxed(&read_ptr_payload->seqno); + hash = readq_relaxed(&read_ptr_payload->hash); + flags = readq_relaxed(&read_ptr_payload->flags); + error = readl_relaxed(&read_ptr_payload->error); + timestamp = readl_relaxed(&read_ptr_payload->timestamp); + HWFNC_DBG_L("tx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%u\n", + i, hash, ctx_id, seqno, flags, error, timestamp); + } + + return count; +} + /** * hw_fence_dbg_dump_table_rd() - debugfs read to dump the hw-fences table. * @file: file handler. @@ -862,6 +941,11 @@ static const struct file_operations hw_fence_dump_table_fops = { .read = hw_fence_dbg_dump_table_rd, }; +static const struct file_operations hw_fence_dump_queues_fops = { + .open = simple_open, + .write = hw_fence_dbg_dump_queues_wr, +}; + static const struct file_operations hw_fence_create_join_fence_fops = { .open = simple_open, .write = hw_fence_dbg_create_join_fence, @@ -908,6 +992,8 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data) debugfs_create_u32("hw_fence_debug_level", 0600, debugfs_root, &msm_hw_fence_debug_level); debugfs_create_file("hw_fence_dump_table", 0600, debugfs_root, drv_data, &hw_fence_dump_table_fops); + debugfs_create_file("hw_fence_dump_queues", 0600, debugfs_root, drv_data, + &hw_fence_dump_queues_fops); debugfs_create_file("hw_sync", 0600, debugfs_root, NULL, &hw_sync_debugfs_fops); return 0; diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index d2db557c4e..c88d8cf1a5 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -324,6 +324,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, writeq_relaxed(hash, &write_ptr_payload->hash); writeq_relaxed(flags, &write_ptr_payload->flags); writel_relaxed(error, &write_ptr_payload->error); + writel_relaxed(hw_fence_get_qtime(drv_data), &write_ptr_payload->timestamp); /* update memory for the message */ wmb(); From b30002d731cae7cf1c2fef522ec1f769ab4053ef Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 29 Jul 2022 14:58:06 -0700 Subject: [PATCH 022/166] mm-drivers: hw_fence: add bounds check for hw fence deregistration Ensure that clients deregister hardware fences for client ids strictly less than HW_FENCE_CLIENT_MAX. This prevents out of bounds array accesses. Change-Id: I3453135cfd7a74373421d8db32c3ecb0fffc70d0 Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 9df871e05a..0f693be07b 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -124,6 +124,11 @@ int msm_hw_fence_deregister(void *client_handle) } hw_fence_client = (struct msm_hw_fence_client *)client_handle; + if (hw_fence_client->client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id); + return -EINVAL; + } + HWFNC_DBG_H("+\n"); /* Free all the allocated resources */ From f168780f740baaeb889d7d2e29770082ebcca3d7 Mon Sep 17 00:00:00 2001 From: Manoj Kumar AVM Date: Wed, 3 Aug 2022 23:25:46 -0700 Subject: [PATCH 023/166] mm-drivers: hw-fence: fix static analysis issue Fix static analysis issue where uninitialized variable is being accessed. Change-Id: Iab6210fb4c67f35c7f1bada592800c10f0ad76bc Signed-off-by: Manoj Kumar AVM --- hw_fence/src/hw_fence_ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 8ff2bdfb02..3ccd2dd7de 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -655,7 +655,7 @@ static long hw_sync_ioctl_reset_client(struct hw_sync_obj *obj, unsigned long ar if (!_is_valid_client(obj)) { return -EINVAL; } else if (IS_ERR_OR_NULL(obj->client_handle)) { - HWFNC_ERR("client:%d handle doesn't exists\n", data.client_id); + HWFNC_ERR("client:%d handle doesn't exists\n", obj->client_id); return -EINVAL; } From 99948e971483dcbd72eba32a14e4f114da3d4472 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 26 Jul 2022 17:32:10 -0700 Subject: [PATCH 024/166] mm-drivers: hw_fence: modify hw fence queue payload structure Add size, type, version, and client_data fields to hw fence queue payload and update 32-bit timestamp field to full 64-bit timestamp field. Change-Id: Iafb0eb80f83acd5753786fa50a31c1fb74f1a2fa Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 29 ++++++++++++++++++++++++++-- hw_fence/src/hw_fence_drv_debug.c | 10 ++++++---- hw_fence/src/hw_fence_drv_priv.c | 17 +++++++++++----- 3 files changed, 45 insertions(+), 11 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 8ce864639c..1efc41cc41 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -69,6 +69,12 @@ */ #define MSM_HW_FENCE_MAX_JOIN_PARENTS 3 +/** + * HW_FENCE_PAYLOAD_REV: + * Payload version with major and minor version information + */ +#define HW_FENCE_PAYLOAD_REV(major, minor) (major << 8 | (minor & 0xFF)) + enum hw_fence_lookup_ops { HW_FENCE_LOOKUP_OP_CREATE = 0x1, HW_FENCE_LOOKUP_OP_DESTROY, @@ -129,6 +135,13 @@ struct msm_hw_fence_queue { phys_addr_t pa_queue; }; +/** + * enum payload_type - Enum with the queue payload types. + */ +enum payload_type { + HW_FENCE_PAYLOAD_TYPE_1 = 1 +}; + /** * struct msm_hw_fence_client - Structure holding the per-Client allocated resources. * @client_id: id of the client @@ -319,22 +332,34 @@ struct hw_fence_driver_data { /** * struct msm_hw_fence_queue_payload - hardware fence clients queues payload. + * @size: size of queue payload + * @type: type of queue payload + * @version: version of queue payload. High eight bits are for major and lower eight + * bits are for minor version * @ctxt_id: context id of the dma fence * @seqno: sequence number of the dma fence * @hash: fence hash * @flags: see MSM_HW_FENCE_FLAG_* flags descriptions + * @client_data: data passed from and returned to waiting client upon fence signaling * @error: error code for this fence, fence controller receives this * error from the signaling client through the tx queue and * propagates the error to the waiting client through rx queue - * @timestamp: qtime when the payload is written into the queue + * @timestamp_lo: low 32-bits of qtime of when the payload is written into the queue + * @timestamp_hi: high 32-bits of qtime of when the payload is written into the queue */ struct msm_hw_fence_queue_payload { + u32 size; + u16 type; + u16 version; u64 ctxt_id; u64 seqno; u64 hash; u64 flags; + u64 client_data; u32 error; - u32 timestamp; + u32 timestamp_lo; + u32 timestamp_hi; + u32 reserve; }; /** diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index a1e80ace58..28674dcca9 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -590,9 +590,10 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user hash = readq_relaxed(&read_ptr_payload->hash); flags = readq_relaxed(&read_ptr_payload->flags); error = readl_relaxed(&read_ptr_payload->error); - timestamp = readl_relaxed(&read_ptr_payload->timestamp); + timestamp = (u64)readl_relaxed(&read_ptr_payload->timestamp_lo) | + ((u64)readl_relaxed(&read_ptr_payload->timestamp_hi) << 32); - HWFNC_DBG_L("rx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%u\n", + HWFNC_DBG_L("rx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%llu\n", i, hash, ctx_id, seqno, flags, error, timestamp); } @@ -607,8 +608,9 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user hash = readq_relaxed(&read_ptr_payload->hash); flags = readq_relaxed(&read_ptr_payload->flags); error = readl_relaxed(&read_ptr_payload->error); - timestamp = readl_relaxed(&read_ptr_payload->timestamp); - HWFNC_DBG_L("tx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%u\n", + timestamp = (u64)readl_relaxed(&read_ptr_payload->timestamp_lo) | + ((u64)readl_relaxed(&read_ptr_payload->timestamp_hi) << 32); + HWFNC_DBG_L("tx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%llu\n", i, hash, ctx_id, seqno, flags, error, timestamp); } diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index c88d8cf1a5..f5bc6f3198 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -112,10 +112,10 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, queues[i].pa_queue = qphys; queues[i].va_header = hfi_queue_header; queues[i].q_size_bytes = queue_size; - HWFNC_DBG_INIT("init:%s client:%d queue[%d]: va=0x%pK pa=0x%x va_hd:0x%pK sz:%d\n", + HWFNC_DBG_INIT("init:%s client:%d q[%d] va=0x%pK pa=0x%x hd:0x%pK sz:%u pkt:%d\n", hfi_queue_header->type == HW_FENCE_TX_QUEUE ? "TX_QUEUE" : "RX_QUEUE", client_id, i, queues[i].va_queue, queues[i].pa_queue, queues[i].va_header, - queues[i].q_size_bytes); + queues[i].q_size_bytes, payload_size); /* Next header */ hfi_queue_header++; @@ -232,10 +232,11 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, u32 q_size_u32; u32 q_free_u32; u32 *q_payload_write_ptr; - u32 payload_size_u32; + u32 payload_size, payload_size_u32; struct msm_hw_fence_queue_payload *write_ptr_payload; bool lock_client = false; u32 lock_idx; + u64 timestamp; int ret = 0; if (queue_type >= HW_FENCE_CLIENT_QUEUES) { @@ -247,7 +248,8 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, hfi_header = queue->va_header; q_size_u32 = (queue->q_size_bytes / sizeof(u32)); - payload_size_u32 = (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)); + payload_size = sizeof(struct msm_hw_fence_queue_payload); + payload_size_u32 = (payload_size / sizeof(u32)); if (!hfi_header) { HWFNC_ERR("Invalid queue\n"); @@ -319,12 +321,17 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, to_write_idx = 0; /* Update Client Queue */ + writeq_relaxed(payload_size, &write_ptr_payload->size); + writew_relaxed(HW_FENCE_PAYLOAD_TYPE_1, &write_ptr_payload->type); + writew_relaxed(HW_FENCE_PAYLOAD_REV(1, 0), &write_ptr_payload->version); writeq_relaxed(ctxt_id, &write_ptr_payload->ctxt_id); writeq_relaxed(seqno, &write_ptr_payload->seqno); writeq_relaxed(hash, &write_ptr_payload->hash); writeq_relaxed(flags, &write_ptr_payload->flags); writel_relaxed(error, &write_ptr_payload->error); - writel_relaxed(hw_fence_get_qtime(drv_data), &write_ptr_payload->timestamp); + timestamp = hw_fence_get_qtime(drv_data); + writel_relaxed(timestamp, &write_ptr_payload->timestamp_lo); + writel_relaxed(timestamp >> 32, &write_ptr_payload->timestamp_hi); /* update memory for the message */ wmb(); From f4e1ed257858d422cbfc0277997ccd45b5898229 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Wed, 24 Aug 2022 14:26:45 -0700 Subject: [PATCH 025/166] mm-drivers: hw_fence: avoid signal during reset for signaled hw fences During a client reset, hw fences that are already signaled should not require to be signaled again, otherwise waiting clients can receive the signal from unexpected hw fences that have been already signaled long time back. Add check to only signal hw fences that are not in signaled state during the client reset. Change-Id: I6f6a6ba142889f9c7ee2bd8680c30592c3c0987f Signed-off-by: Ingrid Gallardo --- hw_fence/src/hw_fence_drv_priv.c | 33 ++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index f5bc6f3198..8bb35aad1d 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1291,13 +1291,29 @@ int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, return ret; } +static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u64 hash, int error) +{ + enum hw_fence_client_id wait_client_id; + struct msm_hw_fence_client *hw_fence_wait_client; + + /* signal with an error all the waiting clients for this fence */ + for (wait_client_id = 0; wait_client_id < HW_FENCE_CLIENT_MAX; wait_client_id++) { + if (hw_fence->wait_client_mask & BIT(wait_client_id)) { + hw_fence_wait_client = drv_data->clients[wait_client_id]; + + if (hw_fence_wait_client) + _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence, + hash, 0, error); + } + } +} + int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, u32 reset_flags) { int ret = 0; - enum hw_fence_client_id wait_client_id; - struct msm_hw_fence_client *hw_fence_wait_client; int error = (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_ERROR) ? 0 : MSM_HW_FENCE_ERROR_RESET; GLOBAL_ATOMIC_STORE(&hw_fence->lock, 1); /* lock */ @@ -1314,16 +1330,9 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, if (hw_fence->fence_allocator == hw_fence_client->client_id) { - /* signal with an error all the waiting clients for this fence */ - for (wait_client_id = 0; wait_client_id < HW_FENCE_CLIENT_MAX; wait_client_id++) { - if (hw_fence->wait_client_mask & BIT(wait_client_id)) { - hw_fence_wait_client = drv_data->clients[wait_client_id]; - - if (hw_fence_wait_client) - _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence, - hash, 0, error); - } - } + /* if fence is not signaled, signal with error all the waiting clients */ + if (!(hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL)) + _signal_all_wait_clients(drv_data, hw_fence, hash, error); if (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY) goto skip_destroy; From 51925b9f9642a825f9884d676833721ec73616f5 Mon Sep 17 00:00:00 2001 From: Amine Najahi Date: Mon, 29 Aug 2022 12:44:40 -0400 Subject: [PATCH 026/166] mm-drivers: configure max driver instances base on build config Add #ifdef to configure the maximum allowed driver instances base on the build configuration, to avoid uninitialized access to fences array. Change-Id: I83ea5ade33a93e23edee21a0435ed7257fe5c9c9 Signed-off-by: Amine Najahi --- sync_fence/src/qcom_sync_file.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/sync_fence/src/qcom_sync_file.c b/sync_fence/src/qcom_sync_file.c index f054f80e34..e292b368ec 100644 --- a/sync_fence/src/qcom_sync_file.c +++ b/sync_fence/src/qcom_sync_file.c @@ -30,6 +30,12 @@ #define FENCE_MIN 1 #define FENCE_MAX 32 +#if IS_ENABLED(CONFIG_DEBUG_FS) + #define MAX_DEVICE_SUPPORTED 2 +#else + #define MAX_DEVICE_SUPPORTED 1 +#endif + struct sync_device { /* device info */ struct class *dev_class; @@ -105,9 +111,9 @@ static void clear_fence_array_tracker(bool force_clear) static struct sync_device *spec_fence_init_locked(struct sync_device *obj, const char *name) { - if (atomic_read(&obj->device_available) > 1) { - pr_err("number of device fds are limited by 2, device opened:%d\n", - atomic_read(&obj->device_available)); + if (atomic_read(&obj->device_available) >= MAX_DEVICE_SUPPORTED) { + pr_err("number of device fds are limited to %d, device opened:%d\n", + MAX_DEVICE_SUPPORTED, atomic_read(&obj->device_available)); return NULL; } else if (!atomic_read(&obj->device_available)) { memset(obj->name, 0, NAME_LEN); From 877fea198c3d4173c7f2ae79c8a102de7617dcb1 Mon Sep 17 00:00:00 2001 From: Alex Danila Date: Thu, 6 Oct 2022 10:29:48 -0400 Subject: [PATCH 027/166] mm-drivers: hw_fence: add missing return type Change addresses a compiler error for missing return type Change-Id: I82f22cefef069988e60608210533250307e516b3 Signed-off-by: Alex Danila --- hw_fence/src/hw_fence_drv_priv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 8bb35aad1d..af935b0407 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -124,7 +124,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, return ret; } -static inline _lock_client_queue(int queue_type) +static inline bool _lock_client_queue(int queue_type) { /* Only lock Rx Queue */ return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? true : false; From b13dcfb79e442eb7f9cbb68ba5544efef9930bd5 Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 12 Sep 2022 10:36:44 -0700 Subject: [PATCH 028/166] mm-drivers: hw_fence: remove client id mask registration logic Remove client id bitmask to track registered clients. This allows support of more than 64 transmit clients. Change-Id: Ia2b4667d008bfceb0b46bfd3e14302e5bec82cb3 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 7 +++---- hw_fence/src/hw_fence_drv_debug.c | 6 +++--- hw_fence/src/hw_fence_drv_priv.c | 5 ++--- hw_fence/src/msm_hw_fence.c | 27 +++++++++++---------------- 4 files changed, 19 insertions(+), 26 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 1efc41cc41..ca15fdb5a9 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -244,7 +244,7 @@ struct msm_hw_fence_dbg_data { * @ctl_start_ptr: pointer to the ctl_start registers of the display hw (platforms with no dpu-ipc) * @ctl_start_size: size of the ctl_start registers of the display hw (platforms with no dpu-ipc) * @client_id_mask: bitmask for tracking registered client_ids - * @clients_mask_lock: lock to synchronize access to the clients mask + * @clients_register_lock: lock to synchronize clients registration and deregistration * @msm_hw_fence_client: table with the handles of the registered clients * @vm_ready: flag to indicate if vm has been initialized * @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized @@ -316,9 +316,8 @@ struct hw_fence_driver_data { void *ctl_start_ptr[HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS]; uint32_t ctl_start_size[HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS]; - /* bitmask for tracking registered client_ids */ - u64 client_id_mask; - struct mutex clients_mask_lock; + /* synchronize client_ids registration and deregistration */ + struct mutex clients_register_lock; /* table with registered client handles */ struct msm_hw_fence_client *clients[HW_FENCE_CLIENT_MAX]; diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 28674dcca9..1844c2926c 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -895,10 +895,10 @@ int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, return -EINVAL; } - mutex_lock(&drv_data->clients_mask_lock); + mutex_lock(&drv_data->clients_register_lock); if (!drv_data->clients[client_id]) { - mutex_unlock(&drv_data->clients_mask_lock); + mutex_unlock(&drv_data->clients_register_lock); return -EINVAL; } @@ -912,7 +912,7 @@ int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, /* wake-up waiting client */ wake_up_all(&hw_fence_client->wait_queue); - mutex_unlock(&drv_data->clients_mask_lock); + mutex_unlock(&drv_data->clients_register_lock); return 0; } diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 8bb35aad1d..af6db58a91 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -624,10 +624,9 @@ void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data, * allocation, then we will need to notify FenceCTL about the client that is * going-away here. */ - mutex_lock(&drv_data->clients_mask_lock); - drv_data->client_id_mask &= ~BIT(hw_fence_client->client_id); + mutex_lock(&drv_data->clients_register_lock); drv_data->clients[hw_fence_client->client_id] = NULL; - mutex_unlock(&drv_data->clients_mask_lock); + mutex_unlock(&drv_data->clients_register_lock); /* Deallocate client's object */ HWFNC_DBG_LUT("freeing client_id:%d\n", hw_fence_client->client_id); diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 0f693be07b..037b95e277 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -35,27 +35,24 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id, !mem_descriptor, client_id); return ERR_PTR(-EINVAL); } + /* Alloc client handle */ + hw_fence_client = kzalloc(sizeof(*hw_fence_client), GFP_KERNEL); + if (!hw_fence_client) + return ERR_PTR(-ENOMEM); /* Avoid race condition if multiple-threads request same client at same time */ - mutex_lock(&hw_fence_drv_data->clients_mask_lock); - if (hw_fence_drv_data->client_id_mask & BIT(client_id)) { + mutex_lock(&hw_fence_drv_data->clients_register_lock); + if (hw_fence_drv_data->clients[client_id]) { HWFNC_ERR("client with id %d already registered\n", client_id); - mutex_unlock(&hw_fence_drv_data->clients_mask_lock); + mutex_unlock(&hw_fence_drv_data->clients_register_lock); + kfree(hw_fence_client); return ERR_PTR(-EINVAL); } /* Mark client as registered */ - hw_fence_drv_data->client_id_mask |= BIT(client_id); - mutex_unlock(&hw_fence_drv_data->clients_mask_lock); + hw_fence_drv_data->clients[client_id] = hw_fence_client; + mutex_unlock(&hw_fence_drv_data->clients_register_lock); - /* Alloc client handle */ - hw_fence_client = kzalloc(sizeof(*hw_fence_client), GFP_KERNEL); - if (!hw_fence_client) { - mutex_lock(&hw_fence_drv_data->clients_mask_lock); - hw_fence_drv_data->client_id_mask &= ~BIT(client_id); - mutex_unlock(&hw_fence_drv_data->clients_mask_lock); - return ERR_PTR(-ENOMEM); - } hw_fence_client->client_id = client_id; hw_fence_client->ipc_client_id = hw_fence_ipcc_get_client_id(hw_fence_drv_data, client_id); @@ -74,8 +71,6 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id, hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id); - hw_fence_drv_data->clients[client_id] = hw_fence_client; - /* Alloc Client HFI Headers and Queues */ ret = hw_fence_alloc_client_resources(hw_fence_drv_data, hw_fence_client, mem_descriptor); @@ -401,7 +396,7 @@ static int msm_hw_fence_probe_init(struct platform_device *pdev) if (rc) goto error; - mutex_init(&hw_fence_drv_data->clients_mask_lock); + mutex_init(&hw_fence_drv_data->clients_register_lock); /* set ready ealue so clients can register */ hw_fence_drv_data->resources_ready = true; From d62205ae1c563a951522e48810ed28d4d1106110 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Wed, 3 Aug 2022 11:22:21 -0700 Subject: [PATCH 029/166] mm-drivers: hw_fence: read qtimer for timestamps Move timestamps to use qtimer instead of sleep timer. Change-Id: I1a5f20c3d1ec31ba13e95713828024a309a53ba1 Signed-off-by: Ingrid Gallardo --- hw_fence/src/hw_fence_drv_priv.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 8bb35aad1d..63ebae2f2a 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -17,7 +17,11 @@ inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data) { +#ifdef HWFENCE_USE_SLEEP_TIMER return readl_relaxed(drv_data->qtime_io_mem); +#else /* USE QTIMER */ + return arch_timer_read_counter(); +#endif /* HWFENCE_USE_SLEEP_TIMER */ } static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, From a5e17f3fa7243790f8581ac6fdbdd7344460ec6e Mon Sep 17 00:00:00 2001 From: Bruce Hoo Date: Fri, 2 Sep 2022 17:24:27 +0800 Subject: [PATCH 030/166] mm-drivers: spec_fence: create dummy spec_fence for fence array creation Create dummy spec_fence and pass it to dma_fence_array_create(), to avoid NULL pointer access in dma_fence_array_create(). Change-Id: I7a283753169cccbed6c842090a48cbb6e185cf9a Signed-off-by: Bruce Hoo --- sync_fence/src/qcom_sync_file.c | 116 +++++++++++++++++++++++++------- 1 file changed, 90 insertions(+), 26 deletions(-) diff --git a/sync_fence/src/qcom_sync_file.c b/sync_fence/src/qcom_sync_file.c index e292b368ec..04d8951233 100644 --- a/sync_fence/src/qcom_sync_file.c +++ b/sync_fence/src/qcom_sync_file.c @@ -36,6 +36,14 @@ #define MAX_DEVICE_SUPPORTED 1 #endif +#define DUMMY_CONTEXT 0xfafadadafafadada +#define DUMMY_SEQNO 0xefa9ce00efa9ce00 + +struct dummy_spec_fence { + struct dma_fence fence; + spinlock_t lock; +}; + struct sync_device { /* device info */ struct class *dev_class; @@ -43,6 +51,7 @@ struct sync_device { struct device *dev; struct cdev *cdev; struct mutex lock; + struct dummy_spec_fence *dummy_fence; /* device drv data */ atomic_t device_available; @@ -61,6 +70,16 @@ struct fence_array_node { /* Speculative Sync Device Driver State */ static struct sync_device sync_dev; +static const char *spec_fence_get_name_dummy(struct dma_fence *fence) +{ + return "dummy_fence"; +} + +static const struct dma_fence_ops dummy_spec_fence_ops = { + .get_driver_name = spec_fence_get_name_dummy, + .get_timeline_name = spec_fence_get_name_dummy, +}; + static bool sanitize_fence_array(struct dma_fence_array *fence) { struct fence_array_node *node; @@ -193,8 +212,10 @@ static int spec_sync_create_array(struct fence_create_data *f) struct sync_file *sync_file; struct dma_fence_array *fence_array; struct fence_array_node *node; + struct dma_fence **fences; + struct dummy_spec_fence *dummy_fence_p = sync_dev.dummy_fence; bool signal_any; - int ret = 0; + int i, ret = 0; if (fd < 0) { pr_err("failed to get_unused_fd_flags\n"); @@ -207,10 +228,39 @@ static int spec_sync_create_array(struct fence_create_data *f) goto error_args; } + fences = kmalloc_array(f->num_fences, sizeof(void *), GFP_KERNEL|__GFP_ZERO); + if (!fences) { + ret = -ENOMEM; + goto error_args; + } + + for (i = 0; i < f->num_fences; i++) { + fences[i] = &dummy_fence_p->fence; + /* + * Increase dummy-fences refcount here, we must do this since any call to + * fence-array release while dummy-fences are the children of the fence-array + * will decrement the dummy_fence refcount. Therefore, to prevent the release + * of the dummy_fence fences, we must keep an extra refcount for every time that + * the fence-array->release can decrement its children's refcount. the extra + * refcount will be decreased impilictly when dma_fence_put(&fence_array->base) + * called. + */ + dma_fence_get(&dummy_fence_p->fence); + } + signal_any = f->flags & SPEC_FENCE_SIGNAL_ALL ? false : true; - fence_array = dma_fence_array_create(f->num_fences, NULL, + fence_array = dma_fence_array_create(f->num_fences, fences, dma_fence_context_alloc(1), 0, signal_any); + if (!fence_array) { + /* fence-array create failed, remove extra refcounts */ + for (i = 0; i < f->num_fences; i++) + dma_fence_put(&dummy_fence_p->fence); + + kfree(fences); + ret = -EINVAL; + goto error_args; + } /* Set the enable signal such that signalling is not done during wait*/ set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence_array->base.flags); @@ -299,9 +349,8 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) struct dma_fence_array *fence_array; struct dma_fence *fence = NULL; struct dma_fence *user_fence = NULL; - struct dma_fence **fence_list; int *user_fds, ret = 0, i; - u32 num_fences, counter; + u32 num_fences; fence = sync_file_get_fence(sync_bind_info->out_bind_fd); if (!fence) { @@ -309,6 +358,13 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) return -EINVAL; } + if (dma_fence_is_signaled(fence)) { + pr_err("spec fence is already signaled, out_fd:%d\n", + sync_bind_info->out_bind_fd); + ret = -EINVAL; + goto end; + } + fence_array = container_of(fence, struct dma_fence_array, base); if (!sanitize_fence_array(fence_array)) { pr_err("spec fence not found in the registered list out_fd:%d\n", @@ -317,14 +373,18 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) goto end; } - if (fence_array->fences) { - pr_err("fence array already populated, spec fd:%d status:%d flags:0x%x\n", - sync_bind_info->out_bind_fd, dma_fence_get_status(fence), fence->flags); - goto end; - } - num_fences = fence_array->num_fences; - counter = num_fences; + + for (i = 0; i < num_fences; i++) { + if (!(fence_array->fences[i]->context == DUMMY_CONTEXT && + fence_array->fences[i]->seqno == DUMMY_SEQNO)) { + pr_err("fence array already populated, spec fd:%d status:%d flags:0x%x\n", + sync_bind_info->out_bind_fd, dma_fence_get_status(fence), + fence->flags); + ret = -EINVAL; + goto end; + } + } user_fds = kzalloc(num_fences * (sizeof(int)), GFP_KERNEL); if (!user_fds) { @@ -332,31 +392,28 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) goto end; } - fence_list = kmalloc_array(num_fences, sizeof(void *), GFP_KERNEL|__GFP_ZERO); - if (!fence_list) { - ret = -ENOMEM; - goto out; - } - if (copy_from_user(user_fds, (void __user *)sync_bind_info->fds, num_fences * sizeof(int))) { - kfree(fence_list); ret = -EFAULT; goto out; } spin_lock(fence->lock); - fence_array->fences = fence_list; for (i = 0; i < num_fences; i++) { user_fence = sync_file_get_fence(user_fds[i]); if (!user_fence) { pr_warn("bind fences are invalid !! user_fd:%d out_bind_fd:%d\n", user_fds[i], sync_bind_info->out_bind_fd); - counter = i; ret = -EINVAL; goto bind_invalid; } fence_array->fences[i] = user_fence; + /* + * At this point the fence-array fully contains valid fences and no more the + * dummy-fence, therefore, we must release the extra refcount that the + * creation of the speculative fence added to the dummy-fence. + */ + dma_fence_put(&sync_dev.dummy_fence->fence); pr_debug("spec fd:%d i:%d bind fd:%d error:%d\n", sync_bind_info->out_bind_fd, i, user_fds[i], fence_array->fences[i]->error); } @@ -372,12 +429,6 @@ bind_invalid: wake_up_all(&sync_dev.wait_queue); if (ret) { - for (i = counter - 1; i >= 0; i--) - dma_fence_put(fence_array->fences[i]); - - kfree(fence_list); - fence_array->fences = NULL; - fence_array->num_fences = 0; dma_fence_set_error(fence, -EINVAL); spin_unlock(fence->lock); dma_fence_signal(fence); @@ -437,6 +488,7 @@ const struct file_operations spec_sync_fops = { static int spec_sync_register_device(void) { + struct dummy_spec_fence *dummy_fence_p = NULL; int ret; sync_dev.dev_class = class_create(THIS_MODULE, CLASS_NAME); @@ -479,6 +531,17 @@ static int spec_sync_register_device(void) INIT_LIST_HEAD(&sync_dev.fence_array_list); init_waitqueue_head(&sync_dev.wait_queue); + dummy_fence_p = kzalloc(sizeof(struct dummy_spec_fence), GFP_KERNEL); + if (!dummy_fence_p) { + ret = -ENOMEM; + goto cdev_add_err; + } + + spin_lock_init(&dummy_fence_p->lock); + dma_fence_init(&dummy_fence_p->fence, &dummy_spec_fence_ops, &dummy_fence_p->lock, + DUMMY_CONTEXT, DUMMY_SEQNO); + sync_dev.dummy_fence = dummy_fence_p; + return 0; cdev_add_err: @@ -511,6 +574,7 @@ static void __exit spec_sync_deinit(void) device_destroy(sync_dev.dev_class, sync_dev.dev_num); unregister_chrdev_region(sync_dev.dev_num, 1); class_destroy(sync_dev.dev_class); + dma_fence_put(&sync_dev.dummy_fence->fence); } module_init(spec_sync_init); From d881744a7249c76a677dc28f68aec870e1836ca0 Mon Sep 17 00:00:00 2001 From: Nilaan Gunabalachandran Date: Tue, 8 Nov 2022 13:37:36 -0500 Subject: [PATCH 031/166] mm-drivers: fix prink argument errors This change fixes printk arguments in mm-drivers which is found with additional compilation flags and add compile flags too. Change-Id: Ic83f044467dca6d391221182096b9c50b7da36de Signed-off-by: Nilaan Gunabalachandran --- hw_fence/Kbuild | 2 ++ hw_fence/src/hw_fence_drv_debug.c | 3 ++- hw_fence/src/hw_fence_drv_priv.c | 2 +- msm_ext_display/Kbuild | 2 ++ sync_fence/Kbuild | 2 ++ 5 files changed, 9 insertions(+), 2 deletions(-) diff --git a/hw_fence/Kbuild b/hw_fence/Kbuild index 8948d581e9..2cf74d291b 100644 --- a/hw_fence/Kbuild +++ b/hw_fence/Kbuild @@ -18,3 +18,5 @@ msm_hw_fence-$(CONFIG_DEBUG_FS) += src/hw_fence_ioctl.o CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" endif +EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \ + -Wformat-invalid-specifier -Wformat-zero-length -Wnonnull \ No newline at end of file diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 1844c2926c..b159c9cee4 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -500,7 +500,8 @@ static int dump_single_entry(struct hw_fence_driver_data *drv_data, char *buf, u hw_fence = msm_hw_fence_find(drv_data, NULL, context, seqno, &hash); if (!hw_fence) { - HWFNC_ERR("no valid hfence found for context:%lu seqno:%lu", context, seqno, hash); + HWFNC_ERR("no valid hfence found for context:%lu seqno:%lu hash:%lu", + context, seqno, hash); len = scnprintf(buf + len, max_size - len, "no valid hfence found for context:%lu seqno:%lu hash:%lu\n", context, seqno, hash); diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 22a932c773..cb59f3d4d3 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -310,7 +310,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, /* calculate the index after the write */ to_write_idx = write_idx + payload_size_u32; - HWFNC_DBG_Q("to_write_idx:%d write_idx:%d payload_size\n", to_write_idx, write_idx, + HWFNC_DBG_Q("to_write_idx:%d write_idx:%d payload_size:%u\n", to_write_idx, write_idx, payload_size_u32); HWFNC_DBG_L("client_id:%d update %s hash:%llu ctx_id:%llu seqno:%llu flags:%llu error:%u\n", hw_fence_client->client_id, _get_queue_type(queue_type), diff --git a/msm_ext_display/Kbuild b/msm_ext_display/Kbuild index 284134c0af..a54149152b 100644 --- a/msm_ext_display/Kbuild +++ b/msm_ext_display/Kbuild @@ -8,3 +8,5 @@ obj-m += msm_ext_display.o msm_ext_display-y := src/msm_ext_display.o CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" +EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \ + -Wformat-invalid-specifier -Wformat-zero-length -Wnonnull \ No newline at end of file diff --git a/sync_fence/Kbuild b/sync_fence/Kbuild index fd631a4348..b1f9db20d7 100644 --- a/sync_fence/Kbuild +++ b/sync_fence/Kbuild @@ -12,3 +12,5 @@ sync_fence-y := src/qcom_sync_file.o CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" endif +EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \ + -Wformat-invalid-specifier -Wformat-zero-length -Wnonnull From 9ff114eee87b210728e52d0969adfcf9816c31e5 Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Wed, 28 Sep 2022 15:58:25 -0600 Subject: [PATCH 032/166] mm-drivers: hw_fence: Add per client ipc interrupt property Not all clients need ipc interrupt for an already signaled fence. Set the per client property based on whether a client needs the interrupt or not. Also, set update_rxq property for GPU client to false, as GPU doesn't need already signaled fences to be sent to GPU Rx Queue. Change-Id: I08a6bbd598695b112124ce6ec409db75d5e11e0f Signed-off-by: Harshdeep Dhatt --- hw_fence/include/hw_fence_drv_ipc.h | 10 ++++ hw_fence/include/hw_fence_drv_priv.h | 2 + hw_fence/src/hw_fence_drv_ipc.c | 70 ++++++++++++++++------------ hw_fence/src/hw_fence_drv_priv.c | 5 +- hw_fence/src/msm_hw_fence.c | 1 + 5 files changed, 56 insertions(+), 32 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_ipc.h b/hw_fence/include/hw_fence_drv_ipc.h index c24781ac36..8a3f922b36 100644 --- a/hw_fence/include/hw_fence_drv_ipc.h +++ b/hw_fence/include/hw_fence_drv_ipc.h @@ -87,4 +87,14 @@ int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 clien */ bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id); +/** + * hw_fence_ipcc_needs_ipc_irq() - Returns bool to indicate if client needs ipc interrupt for + * already signaled fences + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * Return: true if client needs ipc interrupt for signaled fences, false otherwise + */ +bool hw_fence_ipcc_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id); + #endif /* __HW_FENCE_DRV_IPC_H */ diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index ca15fdb5a9..52bf413579 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -150,6 +150,7 @@ enum payload_type { * @ipc_signal_id: id of the signal to be triggered for this client * @ipc_client_id: id of the ipc client for this hw fence driver client * @update_rxq: bool to indicate if client uses rx-queue + * @send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences * @wait_queue: wait queue for the validation clients * @val_signal: doorbell flag to signal the validation clients in the wait queue */ @@ -160,6 +161,7 @@ struct msm_hw_fence_client { int ipc_signal_id; int ipc_client_id; bool update_rxq; + bool send_ipc; #if IS_ENABLED(CONFIG_DEBUG_FS) wait_queue_head_t wait_queue; atomic_t val_signal; diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index c3414a20da..a36163db88 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -13,11 +13,13 @@ * @ipc_client_id: ipc client id for the hw-fence client. * @ipc_signal_id: ipc signal id for the hw-fence client. * @update_rxq: bool to indicate if clinet uses rx-queue. + * @send_ipc: bool to indicate if client requires ipc interrupt for signaled fences */ struct hw_fence_client_ipc_map { int ipc_client_id; int ipc_signal_id; bool update_rxq; + bool send_ipc; }; /** @@ -32,22 +34,22 @@ struct hw_fence_client_ipc_map { * {HW_FENCE_IPC_CLIENT_ID_APPS, 20}. */ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS, 1, true}, /* ctrl queue loopback */ - {HW_FENCE_IPC_CLIENT_ID_GPU, 0, true}, /* ctx0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 14, false}, /* ctl0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 15, false}, /* ctl1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 16, false}, /* ctl2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 17, false}, /* ctl3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 18, false}, /* ctl4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 19, false}, /* ctl5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 1, true, true}, /* ctrl queue loopback */ + {HW_FENCE_IPC_CLIENT_ID_GPU, 0, false, false}, /* ctx0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 14, false, true}, /* ctl0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 15, false, true}, /* ctl1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 16, false, true}, /* ctl2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 17, false, true}, /* ctl3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 18, false, true}, /* ctl4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 19, false, true}, /* ctl5 */ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS, 21, true}, /* val0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 22, true}, /* val1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 23, true}, /* val2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 24, true}, /* val3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 25, true}, /* val4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 26, true}, /* val5 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 27, true}, /* val6 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 21, true, true}, /* val0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 22, true, true}, /* val1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 23, true, true}, /* val2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 24, true, true}, /* val3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 25, true, true}, /* val4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 26, true, true}, /* val5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 27, true, true}, /* val6 */ #endif /* CONFIG_DEBUG_FS */ }; @@ -60,22 +62,22 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_M * Note that the index of this struct must match the enum hw_fence_client_id */ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS, 1, true}, /* ctrl queue loopback */ - {HW_FENCE_IPC_CLIENT_ID_GPU, 0, true}, /* ctx0 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 0, false}, /* ctl0 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 1, false}, /* ctl1 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 2, false}, /* ctl2 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 3, false}, /* ctl3 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 4, false}, /* ctl4 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 5, false}, /* ctl5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 1, true, true}, /* ctrl queue loopback */ + {HW_FENCE_IPC_CLIENT_ID_GPU, 0, false, false}, /* ctx0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 0, false, true}, /* ctl0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 1, false, true}, /* ctl1 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 2, false, true}, /* ctl2 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 3, false, true}, /* ctl3 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 4, false, true}, /* ctl4 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 5, false, true}, /* ctl5 */ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS, 21, true}, /* val0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 22, true}, /* val1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 23, true}, /* val2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 24, true}, /* val3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 25, true}, /* val4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 26, true}, /* val5 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 27, true}, /* val6 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 21, true, true}, /* val0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 22, true, true}, /* val1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 23, true, true}, /* val2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 24, true, true}, /* val3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 25, true, true}, /* val4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 26, true, true}, /* val5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 27, true, true}, /* val6 */ #endif /* CONFIG_DEBUG_FS */ }; @@ -103,6 +105,14 @@ bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int c return drv_data->ipc_clients_table[client_id].update_rxq; } +bool hw_fence_ipcc_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id) +{ + if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + return -EINVAL; + + return drv_data->ipc_clients_table[client_id].send_ipc; +} + /** * _get_ipc_client_name() - Returns ipc client name, used for debugging. */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index cb59f3d4d3..593b365e26 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1058,8 +1058,9 @@ static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, hw_fence->seq_id, hash, flags, error, HW_FENCE_RX_QUEUE - 1); /* Signal the hw fence now */ - hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, - hw_fence_client->ipc_signal_id); + if (hw_fence_client->send_ipc) + hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, + hw_fence_client->ipc_signal_id); } static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data, diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 037b95e277..8eb520527f 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -70,6 +70,7 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id, } hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id); + hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id); /* Alloc Client HFI Headers and Queues */ ret = hw_fence_alloc_client_resources(hw_fence_drv_data, From fbea8f77fa5a0cf9e2fc60a781cf0e856055a65c Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Mon, 19 Sep 2022 12:54:55 -0600 Subject: [PATCH 033/166] mm-drivers: hw_fence: Set MSM_HW_FENCE_FLAG_SIGNALED_BIT flag Set this flag if a hw fence (for which a client wants to wait) has already been signaled. Clients can check this flag and indicate to their respective hardware (or firmware) that this fence is already signaled. Change-Id: I9337cabb771197f2d35ac4386402a25941d73311 Signed-off-by: Harshdeep Dhatt --- hw_fence/include/hw_fence_drv_priv.h | 3 ++- hw_fence/src/hw_fence_drv_debug.c | 3 ++- hw_fence/src/hw_fence_drv_priv.c | 11 ++++++++--- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 52bf413579..de978eb316 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -430,7 +430,8 @@ inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data); int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, int queue_type); int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno); + struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, + u64 seqno); struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash); diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index b159c9cee4..2dd3ae4ec6 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -340,7 +340,8 @@ static ssize_t hw_fence_dbg_tx_and_signal_clients_wr(struct file *file, /***** DST CLIENT - REGISTER WAIT CLIENT ******/ /**********************************************/ /* use same context and seqno that src client used to create fence */ - ret = hw_fence_register_wait_client(drv_data, hw_fence_client_dst, context, seqno); + ret = hw_fence_register_wait_client(drv_data, NULL, hw_fence_client_dst, context, + seqno); if (ret) { HWFNC_ERR("failed to register for wait\n"); return -EINVAL; diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 593b365e26..efa0145d85 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1220,6 +1220,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, /* signal the join hw fence */ _fence_ctl_signal(drv_data, hw_fence_client, join_fence, hash_join_fence, 0, 0); + set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &array->base.flags); /* * job of the join-fence is finished since we already signaled, @@ -1240,7 +1241,8 @@ error_array: } int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno) + struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, + u64 seqno) { struct msm_hw_fence *hw_fence; u64 hash; @@ -1263,8 +1265,11 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, wmb(); /* if hw fence already signaled, signal the client */ - if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) + if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) { + if (fence != NULL) + set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &fence->flags); _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, hash, 0, 0); + } GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); /* unlock */ @@ -1287,7 +1292,7 @@ int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, return -EINVAL; } - ret = hw_fence_register_wait_client(drv_data, hw_fence_client, fence->context, + ret = hw_fence_register_wait_client(drv_data, fence, hw_fence_client, fence->context, fence->seqno); if (ret) HWFNC_ERR("Error registering for wait client:%d\n", hw_fence_client->client_id); From 54256aa9e045237c79dceb2966e2e3b86aa21075 Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Wed, 21 Sep 2022 15:28:53 -0600 Subject: [PATCH 034/166] mm-drivers: hw_fence: Fix join fence signaling It is possible that one (or more) child fences get signaled by fence controller, right after we add the join fence as parent fence of the child fence. If so, the join fence pending child count may become 0 which means we can safely signal the join fence. Change-Id: I0222b93a62db13eeb7867f3741c1db944df036b1 Signed-off-by: Harshdeep Dhatt --- hw_fence/src/hw_fence_drv_priv.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index efa0145d85..f820a824e5 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1124,7 +1124,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *join_fence; struct msm_hw_fence *hw_fence_child; struct dma_fence *child_fence; - u32 signaled_fences = 0; + bool signal_join_fence = false; u64 hash_join_fence, hash; int i, ret = 0; @@ -1178,13 +1178,13 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, /* child fence is already signaled */ GLOBAL_ATOMIC_STORE(&join_fence->lock, 1); /* lock */ - join_fence->pending_child_cnt--; + if (--join_fence->pending_child_cnt == 0) + signal_join_fence = true; /* update memory for the table update */ wmb(); GLOBAL_ATOMIC_STORE(&join_fence->lock, 0); /* unlock */ - signaled_fences++; } else { /* child fence is not signaled */ @@ -1216,7 +1216,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, } /* all fences were signaled, signal client now */ - if (signaled_fences == array->num_fences) { + if (signal_join_fence) { /* signal the join hw fence */ _fence_ctl_signal(drv_data, hw_fence_client, join_fence, hash_join_fence, 0, 0); From ecef24aa6215f786549243d02f5a470a8b2d0c8a Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Fri, 16 Sep 2022 14:59:56 -0700 Subject: [PATCH 035/166] mm-drivers: hw_fence: update ipc regs config to support phys-id Starting pineapple, each ipc client has a different physical-id and virtual-id for registers access and configuration. This change updates the ipc to handle this different configuration. Change-Id: I36fa84b07ffd209ce3fb323ff796f9e7721d7dd2 Signed-off-by: Ingrid Gallardo --- hw_fence/include/hw_fence_drv_debug.h | 33 ++-- hw_fence/include/hw_fence_drv_ipc.h | 37 +++- hw_fence/include/hw_fence_drv_priv.h | 12 +- hw_fence/src/hw_fence_drv_debug.c | 20 ++- hw_fence/src/hw_fence_drv_ipc.c | 235 ++++++++++++++++++-------- hw_fence/src/hw_fence_drv_priv.c | 4 +- hw_fence/src/hw_fence_ioctl.c | 11 +- hw_fence/src/msm_hw_fence.c | 21 ++- 8 files changed, 252 insertions(+), 121 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index de0e6e7a37..4f22b94664 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -69,7 +69,8 @@ int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, in extern const struct file_operations hw_sync_debugfs_fops; struct hw_fence_out_clients_map { - int ipc_client_id; /* ipc client id for the hw fence client */ + int ipc_client_id_vid; /* ipc client virtual id for the hw fence client */ + int ipc_client_id_pid; /* ipc client physical id for the hw fence client */ int ipc_signal_id; /* ipc signal id for the hw fence client */ }; @@ -81,21 +82,21 @@ struct hw_fence_out_clients_map { */ static const struct hw_fence_out_clients_map dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS, 0}, /* CTRL_LOOPBACK */ - {HW_FENCE_IPC_CLIENT_ID_GPU, 0}, /* CTX0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 2}, /* CTL0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 4}, /* CTL1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 6}, /* CTL2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 8}, /* CTL3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 10}, /* CTL4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 12}, /* CTL5 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 21}, /* VAL0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 22}, /* VAL1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 23}, /* VAL2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 24}, /* VAL3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 25}, /* VAL4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 26}, /* VAL5 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 27}, /* VAL6 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 0}, /* CTRL_LOOPBACK */ + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0}, /* CTX0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 2}, /* CTL0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 4}, /* CTL1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 6}, /* CTL2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 8}, /* CTL3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 10}, /* CTL4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 12}, /* CTL5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21}, /* VAL0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22}, /* VAL1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23}, /* VAL2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24}, /* VAL3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25}, /* VAL4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26}, /* VAL5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27}, /* VAL6 */ }; /** diff --git a/hw_fence/include/hw_fence_drv_ipc.h b/hw_fence/include/hw_fence_drv_ipc.h index 8a3f922b36..e905ea8ed6 100644 --- a/hw_fence/include/hw_fence_drv_ipc.h +++ b/hw_fence/include/hw_fence_drv_ipc.h @@ -6,17 +6,26 @@ #ifndef __HW_FENCE_DRV_IPC_H #define __HW_FENCE_DRV_IPC_H -#define HW_FENCE_IPC_CLIENT_ID_APPS 8 -#define HW_FENCE_IPC_CLIENT_ID_GPU 9 -#define HW_FENCE_IPC_CLIENT_ID_DPU 25 +/* ipc clients virtual client-id */ +#define HW_FENCE_IPC_CLIENT_ID_APPS_VID 8 +#define HW_FENCE_IPC_CLIENT_ID_GPU_VID 9 +#define HW_FENCE_IPC_CLIENT_ID_DPU_VID 25 + +/* ipc clients physical client-id */ +#define HW_FENCE_IPC_CLIENT_ID_APPS_PID 3 +#define HW_FENCE_IPC_CLIENT_ID_GPU_PID 4 +#define HW_FENCE_IPC_CLIENT_ID_DPU_PID 9 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA 2 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO 1 -#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KAILUA 2 +#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA 2 +#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE 2 +#define HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE 4 #define HW_FENCE_IPCC_HW_REV_100 0x00010000 /* Lahaina */ #define HW_FENCE_IPCC_HW_REV_110 0x00010100 /* Waipio */ -#define HW_FENCE_IPCC_HW_REV_170 0x00010700 /* Kailua */ +#define HW_FENCE_IPCC_HW_REV_170 0x00010700 /* Kalama */ +#define HW_FENCE_IPCC_HW_REV_203 0x00020003 /* Pineapple */ #define IPC_PROTOCOLp_CLIENTc_VERSION(base, p, c) (base + (0x40000*p) + (0x1000*c)) #define IPC_PROTOCOLp_CLIENTc_CONFIG(base, p, c) (base + 0x8 + (0x40000*p) + (0x1000*c)) @@ -55,8 +64,8 @@ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data); #endif /* HW_DPU_IPCC */ /** - * hw_fence_ipcc_get_client_id() - Returns the ipc client id that corresponds to the hw fence - * driver client. + * hw_fence_ipcc_get_client_virt_id() - Returns the ipc client virtual id that corresponds to the + * hw fence driver client. * @drv_data: driver data. * @client_id: hw fence driver client id. * @@ -64,7 +73,19 @@ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data); * * Return: client_id on success or negative errno (-EINVAL) */ -int hw_fence_ipcc_get_client_id(struct hw_fence_driver_data *drv_data, u32 client_id); +int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id); + +/** + * hw_fence_ipcc_get_client_phys_id() - Returns the ipc client physical id that corresponds to the + * hw fence driver client. + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * The ipc client id returned by this API is used by the hw fence driver when signaling the fence. + * + * Return: client_id on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 client_id); /** * hw_fence_ipcc_get_signal_id() - Returns the ipc signal id that corresponds to the hw fence diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index de978eb316..14a302871d 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -148,7 +148,8 @@ enum payload_type { * @mem_descriptor: hfi header memory descriptor * @queues: queues descriptor * @ipc_signal_id: id of the signal to be triggered for this client - * @ipc_client_id: id of the ipc client for this hw fence driver client + * @ipc_client_vid: virtual id of the ipc client for this hw fence driver client + * @ipc_client_pid: physical id of the ipc client for this hw fence driver client * @update_rxq: bool to indicate if client uses rx-queue * @send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences * @wait_queue: wait queue for the validation clients @@ -159,7 +160,8 @@ struct msm_hw_fence_client { struct msm_hw_fence_mem_addr mem_descriptor; struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES]; int ipc_signal_id; - int ipc_client_id; + int ipc_client_vid; + int ipc_client_pid; bool update_rxq; bool send_ipc; #if IS_ENABLED(CONFIG_DEBUG_FS) @@ -238,7 +240,8 @@ struct msm_hw_fence_dbg_data { * @ipcc_io_mem: base for the ipcc io mem map * @ipcc_size: size of the ipcc io mem mapping * @protocol_id: ipcc protocol id used by this driver - * @ipcc_client_id: ipcc client id for this driver + * @ipcc_client_vid: ipcc client virtual-id for this driver + * @ipcc_client_pid: ipcc client physical-id for this driver * @ipc_clients_table: table with the ipcc mapping for each client of this driver * @qtime_reg_base: qtimer register base address * @qtime_io_mem: qtimer io mem map @@ -304,7 +307,8 @@ struct hw_fence_driver_data { void __iomem *ipcc_io_mem; uint32_t ipcc_size; u32 protocol_id; - u32 ipcc_client_id; + u32 ipcc_client_vid; + u32 ipcc_client_pid; /* table with mapping of ipc client for each hw-fence client */ struct hw_fence_client_ipc_map *ipc_clients_table; diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 2dd3ae4ec6..a1db824278 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -106,8 +106,10 @@ static int _debugfs_ipcc_trigger(struct file *file, const char __user *user_buf, static ssize_t hw_fence_dbg_ipcc_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { - return _debugfs_ipcc_trigger(file, user_buf, count, ppos, HW_FENCE_IPC_CLIENT_ID_APPS, - HW_FENCE_IPC_CLIENT_ID_APPS); + struct hw_fence_driver_data *drv_data = file->private_data; + + return _debugfs_ipcc_trigger(file, user_buf, count, ppos, drv_data->ipcc_client_pid, + drv_data->ipcc_client_vid); } #ifdef HW_DPU_IPCC @@ -124,8 +126,10 @@ static ssize_t hw_fence_dbg_ipcc_write(struct file *file, const char __user *use static ssize_t hw_fence_dbg_ipcc_dpu_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { - return _debugfs_ipcc_trigger(file, user_buf, count, ppos, HW_FENCE_IPC_CLIENT_ID_APPS, - HW_FENCE_IPC_CLIENT_ID_DPU); + struct hw_fence_driver_data *drv_data = file->private_data; + + return _debugfs_ipcc_trigger(file, user_buf, count, ppos, drv_data->ipcc_client_pid, + hw_fence_ipcc_get_client_virt_id(drv_data, HW_FENCE_CLIENT_ID_CTL0)); } @@ -361,8 +365,8 @@ static ssize_t hw_fence_dbg_tx_and_signal_clients_wr(struct file *file, return -EINVAL; /* Write to ipcc to trigger the irq */ - tx_client = HW_FENCE_IPC_CLIENT_ID_APPS; - rx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + tx_client = drv_data->ipcc_client_pid; + rx_client = drv_data->ipcc_client_vid; HWFNC_DBG_IRQ("client:%d tx_client:%d rx_client:%d signal:%d delay:%d in_data%d\n", client_id_src, tx_client, rx_client, signal_id, drv_data->debugfs_data.hw_fence_sim_release_delay, input_data); @@ -866,8 +870,8 @@ static ssize_t hw_fence_dbg_create_join_fence(struct file *file, } /* write to ipcc to trigger the irq */ - tx_client = HW_FENCE_IPC_CLIENT_ID_APPS; - rx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + tx_client = drv_data->ipcc_client_pid; + rx_client = drv_data->ipcc_client_vid; hw_fence_ipcc_trigger_signal(drv_data, tx_client, rx_client, signal_id); usleep_range(drv_data->debugfs_data.hw_fence_sim_release_delay, diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index a36163db88..a3cccfbf31 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -3,6 +3,7 @@ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ +#include #include "hw_fence_drv_priv.h" #include "hw_fence_drv_utils.h" #include "hw_fence_drv_ipc.h" @@ -10,13 +11,15 @@ /** * struct hw_fence_client_ipc_map - map client id with ipc signal for trigger. - * @ipc_client_id: ipc client id for the hw-fence client. + * @ipc_client_id_virt: virtual ipc client id for the hw-fence client. + * @ipc_client_id_phys: physical ipc client id for the hw-fence client. * @ipc_signal_id: ipc signal id for the hw-fence client. * @update_rxq: bool to indicate if clinet uses rx-queue. * @send_ipc: bool to indicate if client requires ipc interrupt for signaled fences */ struct hw_fence_client_ipc_map { - int ipc_client_id; + int ipc_client_id_virt; + int ipc_client_id_phys; int ipc_signal_id; bool update_rxq; bool send_ipc; @@ -34,22 +37,22 @@ struct hw_fence_client_ipc_map { * {HW_FENCE_IPC_CLIENT_ID_APPS, 20}. */ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS, 1, true, true}, /* ctrl queue loopback */ - {HW_FENCE_IPC_CLIENT_ID_GPU, 0, false, false}, /* ctx0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 14, false, true}, /* ctl0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 15, false, true}, /* ctl1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 16, false, true}, /* ctl2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 17, false, true}, /* ctl3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 18, false, true}, /* ctl4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 19, false, true}, /* ctl5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true},/* ctrlq*/ + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false},/* ctx0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 14, false, true},/*ctl0*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 15, false, true},/*ctl1*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 16, false, true},/*ctl2*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 17, false, true},/*ctl3*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 18, false, true},/*ctl4*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 19, false, true},/*ctl5*/ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS, 21, true, true}, /* val0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 22, true, true}, /* val1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 23, true, true}, /* val2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 24, true, true}, /* val3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 25, true, true}, /* val4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 26, true, true}, /* val5 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 27, true, true}, /* val6 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, true},/* val0*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, true},/* val1*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, true},/* val2*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, true},/* val3*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, true},/* val4*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, true},/* val5*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, true},/* val6*/ #endif /* CONFIG_DEBUG_FS */ }; @@ -62,31 +65,67 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_M * Note that the index of this struct must match the enum hw_fence_client_id */ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS, 1, true, true}, /* ctrl queue loopback */ - {HW_FENCE_IPC_CLIENT_ID_GPU, 0, false, false}, /* ctx0 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 0, false, true}, /* ctl0 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 1, false, true}, /* ctl1 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 2, false, true}, /* ctl2 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 3, false, true}, /* ctl3 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 4, false, true}, /* ctl4 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 5, false, true}, /* ctl5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true},/*ctrl q*/ + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false},/*ctx0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 0, false, true},/* ctl0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 1, false, true},/* ctl1 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 2, false, true},/* ctl2 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 3, false, true},/* ctl3 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 4, false, true},/* ctl4 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 5, false, true},/* ctl5 */ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS, 21, true, true}, /* val0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 22, true, true}, /* val1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 23, true, true}, /* val2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 24, true, true}, /* val3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 25, true, true}, /* val4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 26, true, true}, /* val5 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 27, true, true}, /* val6 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, true},/* val0*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, true},/* val1*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, true},/* val2*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, true},/* val3*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, true},/* val4*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, true},/* val5*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, true},/* val6*/ #endif /* CONFIG_DEBUG_FS */ }; -int hw_fence_ipcc_get_client_id(struct hw_fence_driver_data *drv_data, u32 client_id) +/** + * struct hw_fence_clients_ipc_map_v2 - Table makes the 'client to signal' mapping, which is + * used by the hw fence driver to trigger ipc signal when hw fence is already + * signaled. + * This version is for targets that support dpu client id and IPC v2. + * + * Note that the index of this struct must match the enum hw_fence_client_id + */ +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_CLIENT_MAX] = { + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 1, true, true},/*ctrlq */ + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, false, false},/* ctx0*/ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 0, false, true},/* ctl0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 1, false, true},/* ctl1 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 2, false, true},/* ctl2 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 3, false, true},/* ctl3 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 4, false, true},/* ctl4 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 5, false, true},/* ctl5 */ +#if IS_ENABLED(CONFIG_DEBUG_FS) + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, true},/* val0*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, true},/* val1*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, true},/* val2*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, true},/* val3*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, true},/* val4*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, true},/* val5*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, true},/* val6*/ +#endif /* CONFIG_DEBUG_FS */ +}; + +int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id) { if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) return -EINVAL; - return drv_data->ipc_clients_table[client_id].ipc_client_id; + return drv_data->ipc_clients_table[client_id].ipc_client_id_virt; +} + +int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 client_id) +{ + if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + return -EINVAL; + + return drv_data->ipc_clients_table[client_id].ipc_client_id_phys; } int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id) @@ -114,36 +153,53 @@ bool hw_fence_ipcc_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int clie } /** - * _get_ipc_client_name() - Returns ipc client name, used for debugging. + * _get_ipc_phys_client_name() - Returns ipc client name from its physical id, used for debugging. */ -static inline char *_get_ipc_client_name(u32 client_id) +static inline char *_get_ipc_phys_client_name(u32 client_id) { switch (client_id) { - case HW_FENCE_IPC_CLIENT_ID_APPS: - return "APPS"; - case HW_FENCE_IPC_CLIENT_ID_GPU: - return "GPU"; - case HW_FENCE_IPC_CLIENT_ID_DPU: - return "DPU"; + case HW_FENCE_IPC_CLIENT_ID_APPS_PID: + return "APPS_PID"; + case HW_FENCE_IPC_CLIENT_ID_GPU_PID: + return "GPU_PID"; + case HW_FENCE_IPC_CLIENT_ID_DPU_PID: + return "DPU_PID"; } - return "UNKNOWN"; + return "UNKNOWN_PID"; +} + +/** + * _get_ipc_virt_client_name() - Returns ipc client name from its virtual id, used for debugging. + */ +static inline char *_get_ipc_virt_client_name(u32 client_id) +{ + switch (client_id) { + case HW_FENCE_IPC_CLIENT_ID_APPS_VID: + return "APPS_VID"; + case HW_FENCE_IPC_CLIENT_ID_GPU_VID: + return "GPU_VID"; + case HW_FENCE_IPC_CLIENT_ID_DPU_VID: + return "DPU_VID"; + } + + return "UNKNOWN_VID"; } void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data, - u32 tx_client_id, u32 rx_client_id, u32 signal_id) + u32 tx_client_pid, u32 rx_client_vid, u32 signal_id) { void __iomem *ptr; u32 val; /* Send signal */ ptr = IPC_PROTOCOLp_CLIENTc_SEND(drv_data->ipcc_io_mem, drv_data->protocol_id, - tx_client_id); - val = (rx_client_id << 16) | signal_id; + tx_client_pid); + val = (rx_client_vid << 16) | signal_id; HWFNC_DBG_IRQ("Sending ipcc from %s (%d) to %s (%d) signal_id:%d [wr:0x%x to off:0x%pK]\n", - _get_ipc_client_name(tx_client_id), tx_client_id, - _get_ipc_client_name(rx_client_id), rx_client_id, + _get_ipc_phys_client_name(tx_client_pid), tx_client_pid, + _get_ipc_virt_client_name(rx_client_vid), rx_client_vid, signal_id, val, ptr); HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); writel_relaxed(val, ptr); @@ -162,22 +218,32 @@ static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 { switch (hwrev) { case HW_FENCE_IPCC_HW_REV_100: - drv_data->ipcc_client_id = HW_FENCE_IPC_CLIENT_ID_APPS; + drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA; drv_data->ipc_clients_table = hw_fence_clients_ipc_map_no_dpu; HWFNC_DBG_INIT("ipcc protocol_id: Lahaina\n"); break; case HW_FENCE_IPCC_HW_REV_110: - drv_data->ipcc_client_id = HW_FENCE_IPC_CLIENT_ID_APPS; + drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO; drv_data->ipc_clients_table = hw_fence_clients_ipc_map_no_dpu; HWFNC_DBG_INIT("ipcc protocol_id: Waipio\n"); break; case HW_FENCE_IPCC_HW_REV_170: - drv_data->ipcc_client_id = HW_FENCE_IPC_CLIENT_ID_APPS; - drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KAILUA; + drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA; drv_data->ipc_clients_table = hw_fence_clients_ipc_map; - HWFNC_DBG_INIT("ipcc protocol_id: Kailua\n"); + HWFNC_DBG_INIT("ipcc protocol_id: Kalama\n"); + break; + case HW_FENCE_IPCC_HW_REV_203: + drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID; + drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE; /* Fence */ + drv_data->ipc_clients_table = hw_fence_clients_ipc_map_v2; + HWFNC_DBG_INIT("ipcc protocol_id: Pineapple\n"); break; default: return -1; @@ -190,13 +256,25 @@ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) { void __iomem *ptr; u32 val; + int ret; HWFNC_DBG_H("enable ipc +\n"); - /* Read IPC Version from Client=0x8 (apps) for protocol=2 (compute_l1) */ - val = readl_relaxed(IPC_PROTOCOLp_CLIENTc_VERSION(drv_data->ipcc_io_mem, - HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA, HW_FENCE_IPC_CLIENT_ID_APPS)); - HWFNC_DBG_INIT("ipcc version:0x%x\n", val); + /** + * Attempt to read the ipc version from dt, if not available, then attempt + * to read from the registers. + */ + ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-ipc-ver", &val); + if (ret || !val) { + /* if no device tree prop, attempt to get the version from the registers*/ + HWFNC_DBG_H("missing hw fences ipc-ver entry or invalid ret:%d val:%d\n", ret, val); + + /* Read IPC Version from Client=0x8 (apps) for protocol=2 (compute_l1) */ + val = readl_relaxed(IPC_PROTOCOLp_CLIENTc_VERSION(drv_data->ipcc_io_mem, + HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA, + HW_FENCE_IPC_CLIENT_ID_APPS_VID)); + HWFNC_DBG_INIT("ipcc version:0x%x\n", val); + } if (_hw_fence_ipcc_hwrev_init(drv_data, val)) { HWFNC_ERR("ipcc protocol id not supported\n"); @@ -206,14 +284,14 @@ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) /* Enable compute l1 (protocol_id = 2) */ val = 0x00000000; ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id, - HW_FENCE_IPC_CLIENT_ID_APPS); + drv_data->ipcc_client_pid); HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); writel_relaxed(val, ptr); /* Enable Client-Signal pairs from APPS(NS) (0x8) to APPS(NS) (0x8) */ val = 0x000080000; ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, drv_data->protocol_id, - HW_FENCE_IPC_CLIENT_ID_APPS); + drv_data->ipcc_client_pid); HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); writel_relaxed(val, ptr); @@ -226,6 +304,7 @@ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data) { struct hw_fence_client_ipc_map *hw_fence_client; + bool protocol_enabled = false; void __iomem *ptr; u32 val; int i; @@ -239,31 +318,41 @@ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data) HWFNC_DBG_H("ipcc_io_mem:0x%lx\n", (u64)drv_data->ipcc_io_mem); - /* - * Enable compute l1 (protocol_id = 2) for dpu (25) - * Sets bit(1) to clear when RECV_ID is read - */ - val = 0x00000001; - ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id, - HW_FENCE_IPC_CLIENT_ID_DPU); - HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr); - writel_relaxed(val, ptr); - HWFNC_DBG_H("Initialize dpu signals\n"); /* Enable Client-Signal pairs from DPU (25) to APPS(NS) (8) */ for (i = 0; i < HW_FENCE_CLIENT_MAX; i++) { hw_fence_client = &drv_data->ipc_clients_table[i]; /* skip any client that is not a dpu client */ - if (hw_fence_client->ipc_client_id != HW_FENCE_IPC_CLIENT_ID_DPU) + if (hw_fence_client->ipc_client_id_virt != HW_FENCE_IPC_CLIENT_ID_DPU_VID) continue; + if (!protocol_enabled) { + /* + * First DPU client will enable the protocol for dpu, e.g. compute l1 + * (protocol_id = 2) or fencing protocol, depending on the target, for the + * dpu client (vid = 25, pid = 9). + * Sets bit(1) to clear when RECV_ID is read + */ + val = 0x00000001; + ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, + drv_data->protocol_id, hw_fence_client->ipc_client_id_phys); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr); + writel_relaxed(val, ptr); + + protocol_enabled = true; + } + /* Enable signals for dpu client */ - HWFNC_DBG_H("dpu:%d client:%d signal:%d\n", hw_fence_client->ipc_client_id, i, + HWFNC_DBG_H("dpu client:%d vid:%d pid:%d signal:%d\n", i, + hw_fence_client->ipc_client_id_virt, hw_fence_client->ipc_client_id_phys, hw_fence_client->ipc_signal_id); - val = 0x000080000 | (hw_fence_client->ipc_signal_id & 0xFFFF); + + /* Enable input apps-signal for dpu */ + val = (HW_FENCE_IPC_CLIENT_ID_APPS_VID << 16) | + (hw_fence_client->ipc_signal_id & 0xFFFF); ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, - drv_data->protocol_id, HW_FENCE_IPC_CLIENT_ID_DPU); + drv_data->protocol_id, hw_fence_client->ipc_client_id_phys); HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr); writel_relaxed(val, ptr); } diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index f820a824e5..c37374ade4 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1047,8 +1047,8 @@ static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, u64 flags, u32 error) { - u32 tx_client_id = drv_data->ipcc_client_id; - u32 rx_client_id = hw_fence_client->ipc_client_id; + u32 tx_client_id = drv_data->ipcc_client_pid; /* phys id for tx client */ + u32 rx_client_id = hw_fence_client->ipc_client_vid; /* virt id for rx client */ HWFNC_DBG_H("We must signal the client now! hfence hash:%llu\n", hash); diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 3ccd2dd7de..431bf658ed 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -546,6 +546,7 @@ static long hw_sync_ioctl_reg_for_wait(struct hw_sync_obj *obj, unsigned long ar static long hw_sync_ioctl_fence_signal(struct hw_sync_obj *obj, unsigned long arg) { + struct msm_hw_fence_client *hw_fence_client; struct hw_fence_sync_signal_data data; int ret, tx_client, rx_client, signal_id; @@ -556,6 +557,12 @@ static long hw_sync_ioctl_fence_signal(struct hw_sync_obj *obj, unsigned long ar return -EINVAL; } + hw_fence_client = (struct msm_hw_fence_client *)obj->client_handle; + if (!hw_fence_client) { + HWFNC_ERR("invalid client handle\n"); + return -EINVAL; + } + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; @@ -569,8 +576,8 @@ static long hw_sync_ioctl_fence_signal(struct hw_sync_obj *obj, unsigned long ar if (signal_id < 0) return -EINVAL; - tx_client = HW_FENCE_IPC_CLIENT_ID_APPS; - rx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + tx_client = hw_fence_client->ipc_client_vid; + rx_client = hw_fence_client->ipc_client_pid; ret = msm_hw_fence_trigger_signal(obj->client_handle, tx_client, rx_client, signal_id); if (ret) { HWFNC_ERR("hw fence trigger signal has failed\n"); diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 8eb520527f..8b8dfb0a59 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -54,10 +54,14 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id, mutex_unlock(&hw_fence_drv_data->clients_register_lock); hw_fence_client->client_id = client_id; - hw_fence_client->ipc_client_id = hw_fence_ipcc_get_client_id(hw_fence_drv_data, client_id); + hw_fence_client->ipc_client_vid = + hw_fence_ipcc_get_client_virt_id(hw_fence_drv_data, client_id); + hw_fence_client->ipc_client_pid = + hw_fence_ipcc_get_client_phys_id(hw_fence_drv_data, client_id); - if (hw_fence_client->ipc_client_id <= 0) { - HWFNC_ERR("Failed to find client:%d ipc id\n", client_id); + if (hw_fence_client->ipc_client_vid <= 0 || hw_fence_client->ipc_client_pid <= 0) { + HWFNC_ERR("Failed to find client:%d ipc vid:%d pid:%d\n", client_id, + hw_fence_client->ipc_client_vid, hw_fence_client->ipc_client_pid); ret = -EINVAL; goto error; } @@ -91,9 +95,9 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id, if (ret) goto error; - HWFNC_DBG_INIT("-- Initialized ptr:0x%p client_id:%d ipc_signal_id:%d ipc_client_id:%d\n", + HWFNC_DBG_INIT("-- Initialized ptr:0x%p client_id:%d ipc_signal_id:%d ipc vid:%d pid:%d\n", hw_fence_client, hw_fence_client->client_id, hw_fence_client->ipc_signal_id, - hw_fence_client->ipc_client_id); + hw_fence_client->ipc_client_vid, hw_fence_client->ipc_client_pid); #if IS_ENABLED(CONFIG_DEBUG_FS) init_waitqueue_head(&hw_fence_client->wait_queue); @@ -338,8 +342,9 @@ int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 erro } EXPORT_SYMBOL(msm_hw_fence_update_txq); +/* tx client has to be the physical, rx client virtual id*/ int msm_hw_fence_trigger_signal(void *client_handle, - u32 tx_client_id, u32 rx_client_id, + u32 tx_client_pid, u32 rx_client_vid, u32 signal_id) { struct msm_hw_fence_client *hw_fence_client; @@ -355,8 +360,8 @@ int msm_hw_fence_trigger_signal(void *client_handle, hw_fence_client = (struct msm_hw_fence_client *)client_handle; HWFNC_DBG_H("sending ipc for client:%d\n", hw_fence_client->client_id); - hw_fence_ipcc_trigger_signal(hw_fence_drv_data, tx_client_id, - rx_client_id, signal_id); + hw_fence_ipcc_trigger_signal(hw_fence_drv_data, tx_client_pid, + rx_client_vid, signal_id); return 0; } From 05689a41c3f3958ce66b0f2be1d43bddf1f71874 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Thu, 11 Aug 2022 22:10:45 -0700 Subject: [PATCH 036/166] mm-drivers: hw_fence: add inter-vm try lock Add support for inter-vm try-lock between hlos and vm. Change-Id: Iab9087acf82a4a746e9d43a736724ce2e7196237 Signed-off-by: Ingrid Gallardo --- hw_fence/include/hw_fence_drv_debug.h | 4 +++ hw_fence/include/hw_fence_drv_utils.h | 3 +- hw_fence/src/hw_fence_drv_priv.c | 41 +++++++++++----------- hw_fence/src/hw_fence_drv_utils.c | 50 ++++++++++++++++++++------- 4 files changed, 64 insertions(+), 34 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index 4f22b94664..bfb654e603 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -18,6 +18,7 @@ enum hw_fence_drv_prio { HW_FENCE_QUEUE = 0x000010, /* Queue logs */ HW_FENCE_LUT = 0x000020, /* Look-up and algorithm logs */ HW_FENCE_IRQ = 0x000040, /* Interrupt-related messages */ + HW_FENCE_LOCK = 0x000080, /* Lock-related messages */ HW_FENCE_PRINTK = 0x010000, }; @@ -56,6 +57,9 @@ extern u32 msm_hw_fence_debug_level; #define HWFNC_DBG_IRQ(fmt, ...) \ dprintk(HW_FENCE_IRQ, "[hwfence:%s:%d][dbgirq]"fmt, __func__, __LINE__, ##__VA_ARGS__) +#define HWFNC_DBG_LOCK(fmt, ...) \ + dprintk(HW_FENCE_LOCK, "[hwfence:%s:%d][dbglock]"fmt, __func__, __LINE__, ##__VA_ARGS__) + #define HWFNC_WARN(fmt, ...) \ pr_warn("[hwfence:%s:%d][warn][%pS] "fmt, __func__, __LINE__, \ __builtin_return_address(0), ##__VA_ARGS__) diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 092bb625cf..2ef6df0fe9 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -22,10 +22,11 @@ enum hw_fence_mem_reserve { /** * global_atomic_store() - Inter-processor lock + * @drv_data: hw fence driver data * @lock: memory to lock * @val: if true, api locks the memory, if false it unlocks the memory */ -void global_atomic_store(uint64_t *lock, bool val); +void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, bool val); /** * hw_fence_utils_init_virq() - Initialilze doorbell (i.e. vIRQ) for SVM to HLOS signaling diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index c37374ade4..74f7171817 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -13,7 +13,7 @@ #include "hw_fence_drv_debug.h" /* Global atomic lock */ -#define GLOBAL_ATOMIC_STORE(lock, val) global_atomic_store(lock, val) +#define GLOBAL_ATOMIC_STORE(drv_data, lock, val) global_atomic_store(drv_data, lock, val) inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data) { @@ -277,7 +277,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx); /* lock the client rx queue to update */ - GLOBAL_ATOMIC_STORE(&drv_data->client_lock_tbl[lock_idx], 1); /* lock */ + GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 1); /* lock */ } /* Make sure data is ready before read */ @@ -348,7 +348,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, exit: if (lock_client) - GLOBAL_ATOMIC_STORE(&drv_data->client_lock_tbl[lock_idx], 0); /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0); /* unlock */ return ret; } @@ -882,7 +882,7 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d break; } - GLOBAL_ATOMIC_STORE(&hw_fence->lock, 1); + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* compare to either find a free fence or find an allocated fence */ if (compare_fnc(hw_fence, context, seqno)) { @@ -907,7 +907,7 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d /* ctx & seqno must be unique creating a hw-fence */ HWFNC_ERR("cannot create hw fence with same ctx:%llu seqno:%llu\n", context, seqno); - GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); break; } /* compare can fail if we have a collision, we will linearly resolve it */ @@ -915,7 +915,7 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d context, seqno); } - GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* Increment step for the next loop */ step++; @@ -1090,7 +1090,7 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data } /* lock the child while we clean it up from the parent join-fence */ - GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 1); /* lock */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 1); /* lock */ for (j = hw_fence_child->parents_cnt; j > 0; j--) { if (j > MSM_HW_FENCE_MAX_JOIN_PARENTS) { @@ -1110,7 +1110,7 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data wmb(); } } - GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 0); /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */ } /* destroy join fence */ @@ -1141,9 +1141,9 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, } /* update this as waiting client of the join-fence */ - GLOBAL_ATOMIC_STORE(&join_fence->lock, 1); /* lock */ + GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */ join_fence->wait_client_mask |= BIT(hw_fence_client->client_id); - GLOBAL_ATOMIC_STORE(&join_fence->lock, 0); /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */ /* Iterate through fences of the array */ for (i = 0; i < array->num_fences; i++) { @@ -1173,18 +1173,18 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, goto error_array; } - GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 1); /* lock */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 1); /* lock */ if (hw_fence_child->flags & MSM_HW_FENCE_FLAG_SIGNAL) { /* child fence is already signaled */ - GLOBAL_ATOMIC_STORE(&join_fence->lock, 1); /* lock */ + GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */ if (--join_fence->pending_child_cnt == 0) signal_join_fence = true; /* update memory for the table update */ wmb(); - GLOBAL_ATOMIC_STORE(&join_fence->lock, 0); /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */ } else { /* child fence is not signaled */ @@ -1201,7 +1201,8 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, /* update memory for the table update */ wmb(); - GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 0); /* unlock */ + /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); ret = -EINVAL; goto error_array; } @@ -1212,7 +1213,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, /* update memory for the table update */ wmb(); } - GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 0); /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */ } /* all fences were signaled, signal client now */ @@ -1254,7 +1255,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, return -EINVAL; } - GLOBAL_ATOMIC_STORE(&hw_fence->lock, 1); /* lock */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ /* register client in the hw fence */ hw_fence->wait_client_mask |= BIT(hw_fence_client->client_id); @@ -1264,6 +1265,8 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, /* update memory for the table update */ wmb(); + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + /* if hw fence already signaled, signal the client */ if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) { if (fence != NULL) @@ -1271,8 +1274,6 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, hash, 0, 0); } - GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); /* unlock */ - return 0; } @@ -1325,7 +1326,7 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, int ret = 0; int error = (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_ERROR) ? 0 : MSM_HW_FENCE_ERROR_RESET; - GLOBAL_ATOMIC_STORE(&hw_fence->lock, 1); /* lock */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ if (hw_fence->wait_client_mask & BIT(hw_fence_client->client_id)) { HWFNC_DBG_H("clearing client:%d wait bit for fence: ctx:%d seqno:%d\n", hw_fence_client->client_id, hw_fence->ctx_id, @@ -1335,7 +1336,7 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, /* update memory for the table update */ wmb(); } - GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ if (hw_fence->fence_allocator == hw_fence_client->client_id) { diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 5d791de1ad..36d8494e1d 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -17,40 +17,64 @@ static void _lock(uint64_t *wait) { - /* WFE Wait */ #if defined(__aarch64__) - __asm__("SEVL\n\t" + __asm__( + // Sequence to wait for lock to be free (i.e. zero) "PRFM PSTL1KEEP, [%x[i_lock]]\n\t" "1:\n\t" - "WFE\n\t" "LDAXR W5, [%x[i_lock]]\n\t" "CBNZ W5, 1b\n\t" - "STXR W5, W0, [%x[i_lock]]\n\t" - "CBNZ W5, 1b\n" + // Sequence to set PVM BIT0 + "LDR W7, =0x1\n\t" // Load BIT0 (0x1) into W7 + "STXR W5, W7, [%x[i_lock]]\n\t" // Atomic Store exclusive BIT0 (lock = 0x1) + "CBNZ W5, 1b\n\t" // If cannot set it, goto 1 : : [i_lock] "r" (wait) : "memory"); #endif } -static void _unlock(uint64_t *lock) +static void _unlock(struct hw_fence_driver_data *drv_data, uint64_t *lock) { - /* Signal Client */ + uint64_t lock_val; + #if defined(__aarch64__) - __asm__("STLR WZR, [%x[i_out]]\n\t" - "SEV\n" + __asm__( + // Sequence to clear PVM BIT0 + "2:\n\t" + "LDAXR W5, [%x[i_out]]\n\t" // Atomic Fetch Lock + "AND W6, W5, #0xFFFFFFFFFFFFFFFE\n\t" // AND to clear BIT0 (lock &= ~0x1)) + "STXR W5, W6, [%x[i_out]]\n\t" // Store exclusive result + "CBNZ W5, 2b\n\t" // If cannot store exclusive, goto 2 : : [i_out] "r" (lock) : "memory"); #endif + mb(); /* Make sure the memory is updated */ + + lock_val = *lock; /* Read the lock value */ + HWFNC_DBG_LOCK("unlock: lock_val after:0x%llx\n", lock_val); + if (lock_val & 0x2) { /* check if SVM BIT1 is set*/ + /* + * SVM is in WFI state, since SVM acquire bit is set + * Trigger IRQ to Wake-Up SVM Client + */ + HWFNC_DBG_LOCK("triggering ipc to unblock SVM lock_val:%d\n", lock_val); + hw_fence_ipcc_trigger_signal(drv_data, + drv_data->ipcc_client_pid, + drv_data->ipcc_client_vid, 30); /* Trigger APPS Signal 30 */ + } } -void global_atomic_store(uint64_t *lock, bool val) +void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, bool val) { - if (val) + if (val) { + preempt_disable(); _lock(lock); - else - _unlock(lock); + } else { + _unlock(drv_data, lock); + preempt_enable(); + } } /* From f5cc2eb42f5542a0e6d69f2564b59573e9294041 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Fri, 21 Oct 2022 18:46:26 -0700 Subject: [PATCH 037/166] mm-drivers: hw_fence: add check to avoid empty join hw fence Current driver creates a join hw-fence from a fence-array, adds its waiting client to it, and then it decides if signal the hw-fence depending in the current state of all the child hw-fences from the fence array. However, if by any reason the fence-array gets all its children cleared within it (which can happen for spec-fences failures), hw-fence driver logic won't signal the new created join-fence. This can lead to the creation of an empty or incomplete join-fence that the waiting-client will be waiting-for, but won't be signaled. Add a check to make sure that if above scenario is ever presented, the register for wait API catches this issue and fails to register for wait in this invalid fence. Change-Id: If3c69405d2a3adfefd12f447257c2560b839d238 Signed-off-by: Ingrid Gallardo --- hw_fence/src/hw_fence_drv_priv.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 74f7171817..4fcfdc4131 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1072,9 +1072,16 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data int idx, j; u64 hash = 0; + if (!array->fences) + goto destroy_fence; + /* cleanup the child-fences from the parent join-fence */ for (idx = iteration; idx >= 0; idx--) { child_fence = array->fences[idx]; + if (!child_fence) { + HWFNC_ERR("invalid child fence idx:%d\n", idx); + continue; + } hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context, child_fence->seqno, &hash); @@ -1113,6 +1120,7 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */ } +destroy_fence: /* destroy join fence */ _hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence, false); @@ -1230,6 +1238,15 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, */ _hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence, false); + } else if (!array->num_fences) { + /* + * if we didn't signal the join-fence and the number of fences is not set in + * the fence-array, then fail here, otherwise driver would create a join-fence + * with no-childs that won't be signaled at all or an incomplete join-fence + */ + HWFNC_ERR("invalid fence-array ctx:%llu seqno:%llu without fences\n", + array->base.context, array->base.seqno); + goto error_array; } return ret; From b87b258b9e9d53cc84af592c5aaf97a819dc9e12 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Wed, 26 Oct 2022 12:08:28 -0700 Subject: [PATCH 038/166] mm-drivers: hw_fence: add debug refcount to trylock Add debugfs to query the amount of times that inter-vm trylock needs to wakeup the svm. Change-Id: Ic1f88319f502e652902be0d45792768cf5c5154e Signed-off-by: Ingrid Gallardo --- hw_fence/include/hw_fence_drv_priv.h | 3 +++ hw_fence/src/hw_fence_drv_debug.c | 2 ++ hw_fence/src/hw_fence_drv_utils.c | 6 +++++- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 14a302871d..b9165fe666 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -191,6 +191,7 @@ struct msm_hw_fence_mem_data { * @create_hw_fences: boolean to continuosly create hw-fences within debugfs * @clients_list: list of debug clients registered * @clients_list_lock: lock to synchronize access to the clients list + * @lock_wake_cnt: number of times that driver triggers wake-up ipcc to unlock inter-vm try-lock */ struct msm_hw_fence_dbg_data { struct dentry *root; @@ -204,6 +205,8 @@ struct msm_hw_fence_dbg_data { struct list_head clients_list; struct mutex clients_list_lock; + + u64 lock_wake_cnt; }; /** diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index a1db824278..314bf27a18 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -1003,6 +1003,8 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data) debugfs_create_file("hw_fence_dump_queues", 0600, debugfs_root, drv_data, &hw_fence_dump_queues_fops); debugfs_create_file("hw_sync", 0600, debugfs_root, NULL, &hw_sync_debugfs_fops); + debugfs_create_u64("hw_fence_lock_wake_cnt", 0600, debugfs_root, + &drv_data->debugfs_data.lock_wake_cnt); return 0; } diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 36d8494e1d..c8eab917dd 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -59,7 +59,11 @@ static void _unlock(struct hw_fence_driver_data *drv_data, uint64_t *lock) * SVM is in WFI state, since SVM acquire bit is set * Trigger IRQ to Wake-Up SVM Client */ - HWFNC_DBG_LOCK("triggering ipc to unblock SVM lock_val:%d\n", lock_val); +#if IS_ENABLED(CONFIG_DEBUG_FS) + drv_data->debugfs_data.lock_wake_cnt++; + HWFNC_DBG_LOCK("triggering ipc to unblock SVM lock_val:%d cnt:%llu\n", lock_val, + drv_data->debugfs_data.lock_wake_cnt); +#endif hw_fence_ipcc_trigger_signal(drv_data, drv_data->ipcc_client_pid, drv_data->ipcc_client_vid, 30); /* Trigger APPS Signal 30 */ From 66c1c4f019d1a8bcafa2825c4a1ed24d572ffe69 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 17 Aug 2022 15:05:02 -0700 Subject: [PATCH 039/166] mm-drivers: hw_fence: update new APIs for synx compat support Add new APIs to receive params client-id and handles of hw fences to manage synx compat support. Change-Id: I5dae0845f8eb2c6c05cc2605d8fc93935c780901 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 8 ++-- hw_fence/src/hw_fence_drv_debug.c | 5 +- hw_fence/src/hw_fence_drv_priv.c | 57 ++++++++++++++++------ hw_fence/src/hw_fence_ioctl.c | 2 +- hw_fence/src/msm_hw_fence.c | 70 +++++++++++++++++++++++++--- 5 files changed, 117 insertions(+), 25 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 14a302871d..7b83538670 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -422,11 +422,13 @@ int hw_fence_create(struct hw_fence_driver_data *drv_data, int hw_fence_destroy(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno); +int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hash); int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, - struct dma_fence_array *array); + struct dma_fence_array *array, u64 *hash_join_fence); int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence); + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash); int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, u64 flags, u32 error, int queue_type); @@ -435,7 +437,7 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, int queue_type); int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, - u64 seqno); + u64 seqno, u64 *hash); struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash); diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index a1db824278..9959db5bbc 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -345,7 +345,7 @@ static ssize_t hw_fence_dbg_tx_and_signal_clients_wr(struct file *file, /**********************************************/ /* use same context and seqno that src client used to create fence */ ret = hw_fence_register_wait_client(drv_data, NULL, hw_fence_client_dst, context, - seqno); + seqno, &hash); if (ret) { HWFNC_ERR("failed to register for wait\n"); return -EINVAL; @@ -861,7 +861,8 @@ static ssize_t hw_fence_dbg_create_join_fence(struct file *file, /* wait on the fence array */ fence_array_fence = &fence_array->base; - msm_hw_fence_wait_update(client_info_dst->client_handle, &fence_array_fence, 1, 1); + msm_hw_fence_wait_update_v2(client_info_dst->client_handle, &fence_array_fence, NULL, NULL, + 1, 1); signal_id = dbg_out_clients_signal_map_no_dpu[client_id_src].ipc_signal_id; if (signal_id < 0) { diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 74f7171817..dd9fa3c348 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -982,6 +982,37 @@ int hw_fence_destroy(struct hw_fence_driver_data *drv_data, return ret; } +int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hash) +{ + u32 client_id = hw_fence_client->client_id; + struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; + struct msm_hw_fence *hw_fence = NULL; + int ret = 0; + + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, hash); + if (!hw_fence) { + HWFNC_ERR("bad hw fence hash:%llu client:%lu\n", hash, client_id); + return -EINVAL; + } + + if (hw_fence->fence_allocator != client_id) { + HWFNC_ERR("client:%lu cannot destroy fence hash:%llu fence_allocator:%lu\n", + client_id, hash, hw_fence->fence_allocator); + return -EINVAL; + } + + /* remove hw fence from table*/ + if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, hw_fence->ctx_id, + hw_fence->seq_id)) { + HWFNC_ERR("Fail destroying fence client:%lu ctx:%llu seqno:%llu hash:%llu\n", + client_id, hw_fence->ctx_id, hw_fence->seq_id, hash); + ret = -EINVAL; + } + + return ret; +} + static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array, u64 *hash, bool create) @@ -1119,13 +1150,14 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data } int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array) + struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array, + u64 *hash_join_fence) { struct msm_hw_fence *join_fence; struct msm_hw_fence *hw_fence_child; struct dma_fence *child_fence; bool signal_join_fence = false; - u64 hash_join_fence, hash; + u64 hash; int i, ret = 0; /* @@ -1134,7 +1166,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, * join_fence->pending_child_count = array->num_fences */ join_fence = _hw_fence_process_join_fence(drv_data, hw_fence_client, array, - &hash_join_fence, true); + hash_join_fence, true); if (!join_fence) { HWFNC_ERR("cannot alloc hw fence for join fence array\n"); return -EINVAL; @@ -1208,7 +1240,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, } hw_fence_child->parent_list[hw_fence_child->parents_cnt - 1] = - hash_join_fence; + *hash_join_fence; /* update memory for the table update */ wmb(); @@ -1220,7 +1252,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, if (signal_join_fence) { /* signal the join hw fence */ - _fence_ctl_signal(drv_data, hw_fence_client, join_fence, hash_join_fence, 0, 0); + _fence_ctl_signal(drv_data, hw_fence_client, join_fence, *hash_join_fence, 0, 0); set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &array->base.flags); /* @@ -1228,7 +1260,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, * we can delete it now. This can happen when all the fences that * are part of the join-fence are already signaled. */ - _hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence, + _hw_fence_process_join_fence(drv_data, hw_fence_client, array, hash_join_fence, false); } @@ -1236,20 +1268,19 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, error_array: _cleanup_join_and_child_fences(drv_data, hw_fence_client, i, array, join_fence, - hash_join_fence); + *hash_join_fence); return -EINVAL; } int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, - u64 seqno) + u64 seqno, u64 *hash) { struct msm_hw_fence *hw_fence; - u64 hash; /* find the hw fence within the table */ - hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, &hash); + hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, hash); if (!hw_fence) { HWFNC_ERR("Cannot find fence!\n"); return -EINVAL; @@ -1271,7 +1302,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) { if (fence != NULL) set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &fence->flags); - _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, hash, 0, 0); + _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, *hash, 0, 0); } return 0; @@ -1279,7 +1310,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, - struct dma_fence *fence) + struct dma_fence *fence, u64 *hash) { int ret = 0; @@ -1294,7 +1325,7 @@ int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, } ret = hw_fence_register_wait_client(drv_data, fence, hw_fence_client, fence->context, - fence->seqno); + fence->seqno, hash); if (ret) HWFNC_ERR("Error registering for wait client:%d\n", hw_fence_client->client_id); diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 431bf658ed..72566126c6 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -536,7 +536,7 @@ static long hw_sync_ioctl_reg_for_wait(struct hw_sync_obj *obj, unsigned long ar return -EINVAL; } - ret = msm_hw_fence_wait_update(obj->client_handle, &fence, num_fences, 1); + ret = msm_hw_fence_wait_update_v2(obj->client_handle, &fence, NULL, NULL, num_fences, 1); /* Decrement the refcount that hw_sync_get_fence increments */ dma_fence_put(fence); diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 8b8dfb0a59..ee34367a54 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -238,8 +238,41 @@ int msm_hw_fence_destroy(void *client_handle, } EXPORT_SYMBOL(msm_hw_fence_destroy); -int msm_hw_fence_wait_update(void *client_handle, - struct dma_fence **fence_list, u32 num_fences, bool create) +int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle) +{ + struct msm_hw_fence_client *hw_fence_client; + int ret; + + if (IS_ERR_OR_NULL(client_handle)) { + HWFNC_ERR("Invalid data\n"); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + if (hw_fence_client->client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id); + return -EINVAL; + } + + HWFNC_DBG_H("+\n"); + + /* Destroy the HW Fence, i.e. remove entry in the Global Table for the Fence */ + ret = hw_fence_destroy_with_hash(hw_fence_drv_data, hw_fence_client, handle); + if (ret) { + HWFNC_ERR("Error destroying the HW fence handle:%llu client_id:%d\n", handle, + hw_fence_client->client_id); + return ret; + } + + HWFNC_DBG_H("-\n"); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_destroy_with_handle); + +int msm_hw_fence_wait_update_v2(void *client_handle, + struct dma_fence **fence_list, u64 *handles, u64 *client_data_list, u32 num_fences, + bool create) { struct msm_hw_fence_client *hw_fence_client; struct dma_fence_array *array; @@ -262,30 +295,43 @@ int msm_hw_fence_wait_update(void *client_handle, /* Process all the list of fences */ for (i = 0; i < num_fences; i++) { struct dma_fence *fence = fence_list[i]; + u64 hash; /* Process a Fence-Array */ array = to_dma_fence_array(fence); if (array) { ret = hw_fence_process_fence_array(hw_fence_drv_data, hw_fence_client, - array); + array, &hash); if (ret) { - HWFNC_ERR("Failed to create FenceArray\n"); + HWFNC_ERR("Failed to process FenceArray\n"); return ret; } } else { /* Process individual Fence */ - ret = hw_fence_process_fence(hw_fence_drv_data, hw_fence_client, fence); + ret = hw_fence_process_fence(hw_fence_drv_data, hw_fence_client, fence, + &hash); if (ret) { - HWFNC_ERR("Failed to create Fence\n"); + HWFNC_ERR("Failed to process Fence\n"); return ret; } } + + if (handles) + handles[i] = hash; } HWFNC_DBG_H("-\n"); return 0; } +EXPORT_SYMBOL(msm_hw_fence_wait_update_v2); + +int msm_hw_fence_wait_update(void *client_handle, + struct dma_fence **fence_list, u32 num_fences, bool create) +{ + return msm_hw_fence_wait_update_v2(client_handle, fence_list, NULL, NULL, num_fences, + create); +} EXPORT_SYMBOL(msm_hw_fence_wait_update); int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) @@ -316,6 +362,18 @@ int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) } EXPORT_SYMBOL(msm_hw_fence_reset_client); +int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id, u32 reset_flags) +{ + if (client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid client_id:%d\n", client_id); + return -EINVAL; + } + + return msm_hw_fence_reset_client(hw_fence_drv_data->clients[client_id], + reset_flags); +} +EXPORT_SYMBOL(msm_hw_fence_reset_client_by_id); + int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error) { struct msm_hw_fence_client *hw_fence_client; From b09b4f0720bd5bb1eb90acf802537b6be99a5f13 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 2 Aug 2022 11:19:55 -0700 Subject: [PATCH 040/166] mm-drivers: hw_fence: add support for 64-bit client_data Add support of the option to pass a 64-bit client_data value to the hw fence driver when a client registers as a waiting client for a hardware fence. Then during fence signaling, this client_data is returned to the client via the RxQ. If no client_data is passed to the driver for the hw fence, then a default value of zero is registered as the client_data. Change-Id: I34cf3e50413639d53cbfa8251c98b9ff1d3cbf4a Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 34 ++++++++++-- hw_fence/src/hw_fence_drv_debug.c | 13 ++--- hw_fence/src/hw_fence_drv_priv.c | 78 ++++++++++++++++++++++++---- hw_fence/src/msm_hw_fence.c | 18 +++++-- 4 files changed, 119 insertions(+), 24 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 7b83538670..91bb4adbb5 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -121,6 +121,27 @@ enum hw_fence_loopback_id { #define HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS (HW_FENCE_LOOPBACK_DPU_CTL_5 + 1) +/** + * enum hw_fence_client_data_id - Enum with the clients having client_data, an optional + * parameter passed from the waiting client and returned + * to it upon fence signaling + * @HW_FENCE_CLIENT_DATA_ID_CTX0: GFX Client. + * @HW_FENCE_CLIENT_DATA_ID_IPE: IPE Client. + * @HW_FENCE_CLIENT_DATA_ID_VPU: VPU Client. + * @HW_FENCE_CLIENT_DATA_ID_VAL0: Debug validation client 0. + * @HW_FENCE_CLIENT_DATA_ID_VAL1: Debug validation client 1. + * @HW_FENCE_MAX_CLIENTS_WITH_DATA: Max number of clients with data, also indicates an + * invalid hw_fence_client_data_id + */ +enum hw_fence_client_data_id { + HW_FENCE_CLIENT_DATA_ID_CTX0, + HW_FENCE_CLIENT_DATA_ID_IPE, + HW_FENCE_CLIENT_DATA_ID_VPU, + HW_FENCE_CLIENT_DATA_ID_VAL0, + HW_FENCE_CLIENT_DATA_ID_VAL1, + HW_FENCE_MAX_CLIENTS_WITH_DATA, +}; + /** * struct msm_hw_fence_queue - Structure holding the data of the hw fence queues. * @va_queue: pointer to the virtual address of the queue elements @@ -387,6 +408,8 @@ struct msm_hw_fence_queue_payload { * @fence_trigger_time: debug info with the trigger time timestamp * @fence_wait_time: debug info with the register-for-wait timestamp * @debug_refcount: refcount used for debugging + * @client_data: array of data optionally passed from and returned to clients waiting on the fence + * during fence signaling */ struct msm_hw_fence { u32 valid; @@ -405,6 +428,7 @@ struct msm_hw_fence { u64 fence_trigger_time; u64 fence_wait_time; u64 debug_refcount; + u64 client_data[HW_FENCE_MAX_CLIENTS_WITH_DATA]; }; int hw_fence_init(struct hw_fence_driver_data *drv_data); @@ -426,20 +450,22 @@ int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 hash); int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, - struct dma_fence_array *array, u64 *hash_join_fence); + struct dma_fence_array *array, u64 *hash_join_fence, u64 client_data); int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash); + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash, + u64 client_data); int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, - u64 flags, u32 error, int queue_type); + u64 flags, u64 client_data, u32 error, int queue_type); inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data); int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, int queue_type); int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, - u64 seqno, u64 *hash); + u64 seqno, u64 *hash, u64 client_data); struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash); +enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id client_id); #endif /* __HW_FENCE_DRV_INTERNAL_H */ diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 9959db5bbc..f54e8dfc26 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -338,14 +338,14 @@ static ssize_t hw_fence_dbg_tx_and_signal_clients_wr(struct file *file, /* Write to Tx queue */ hw_fence_update_queue(drv_data, hw_fence_client, context, seqno, hash, - 0, 0, HW_FENCE_TX_QUEUE - 1); // no flags and no error + 0, 0, 0, HW_FENCE_TX_QUEUE - 1); /* no flags and no error */ /**********************************************/ /***** DST CLIENT - REGISTER WAIT CLIENT ******/ /**********************************************/ /* use same context and seqno that src client used to create fence */ ret = hw_fence_register_wait_client(drv_data, NULL, hw_fence_client_dst, context, - seqno, &hash); + seqno, &hash, 0); if (ret) { HWFNC_ERR("failed to register for wait\n"); return -EINVAL; @@ -558,7 +558,7 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user struct hw_fence_driver_data *drv_data; struct msm_hw_fence_queue *rx_queue; struct msm_hw_fence_queue *tx_queue; - u64 hash, ctx_id, seqno, timestamp, flags; + u64 hash, ctx_id, seqno, timestamp, flags, client_data; u32 *read_ptr, error; int client_id, i; struct msm_hw_fence_queue_payload *read_ptr_payload; @@ -595,12 +595,13 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user seqno = readq_relaxed(&read_ptr_payload->seqno); hash = readq_relaxed(&read_ptr_payload->hash); flags = readq_relaxed(&read_ptr_payload->flags); + client_data = readq_relaxed(&read_ptr_payload->client_data); error = readl_relaxed(&read_ptr_payload->error); timestamp = (u64)readl_relaxed(&read_ptr_payload->timestamp_lo) | ((u64)readl_relaxed(&read_ptr_payload->timestamp_hi) << 32); - HWFNC_DBG_L("rx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%llu\n", - i, hash, ctx_id, seqno, flags, error, timestamp); + HWFNC_DBG_L("rx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu d:%llu err:%u time:%llu\n", + i, hash, ctx_id, seqno, flags, client_data, error, timestamp); } HWFNC_DBG_L("-------TX QUEUE------\n"); @@ -855,7 +856,7 @@ static ssize_t hw_fence_dbg_create_join_fence(struct file *file, /* Write to Tx queue */ hw_fence_update_queue(drv_data, hw_fence_client, client_info_src->dma_context, - hw_fence_dbg_seqno + i, hash, 0, 0, + hw_fence_dbg_seqno + i, hash, 0, 0, 0, HW_FENCE_TX_QUEUE - 1); } diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index dd9fa3c348..b1011967ec 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -208,6 +208,7 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, payload->seqno = readq_relaxed(&read_ptr_payload->seqno); payload->hash = readq_relaxed(&read_ptr_payload->hash); payload->flags = readq_relaxed(&read_ptr_payload->flags); + payload->client_data = readq_relaxed(&read_ptr_payload->client_data); payload->error = readl_relaxed(&read_ptr_payload->error); /* update the read index */ @@ -226,7 +227,7 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, */ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, - u64 flags, u32 error, int queue_type) + u64 flags, u64 client_data, u32 error, int queue_type) { struct msm_hw_fence_hfi_queue_header *hfi_header; struct msm_hw_fence_queue *queue; @@ -332,6 +333,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, writeq_relaxed(seqno, &write_ptr_payload->seqno); writeq_relaxed(hash, &write_ptr_payload->hash); writeq_relaxed(flags, &write_ptr_payload->flags); + writeq_relaxed(client_data, &write_ptr_payload->client_data); writel_relaxed(error, &write_ptr_payload->error); timestamp = hw_fence_get_qtime(drv_data); writel_relaxed(timestamp, &write_ptr_payload->timestamp_lo); @@ -726,6 +728,8 @@ static void _cleanup_hw_fence(struct msm_hw_fence *hw_fence) for (i = 0; i < MSM_HW_FENCE_MAX_JOIN_PARENTS; i++) hw_fence->parent_list[i] = HW_FENCE_INVALID_PARENT_FENCE; + + memset(hw_fence->client_data, 0, sizeof(hw_fence->client_data)); } /* This function must be called with the hw fence lock */ @@ -1076,7 +1080,7 @@ struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, - u64 flags, u32 error) + u64 flags, u64 client_data, u32 error) { u32 tx_client_id = drv_data->ipcc_client_pid; /* phys id for tx client */ u32 rx_client_id = hw_fence_client->ipc_client_vid; /* virt id for rx client */ @@ -1086,7 +1090,7 @@ static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, /* Write to Rx queue */ if (hw_fence_client->update_rxq) hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id, - hw_fence->seq_id, hash, flags, error, HW_FENCE_RX_QUEUE - 1); + hw_fence->seq_id, hash, flags, client_data, error, HW_FENCE_RX_QUEUE - 1); /* Signal the hw fence now */ if (hw_fence_client->send_ipc) @@ -1151,7 +1155,7 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array, - u64 *hash_join_fence) + u64 *hash_join_fence, u64 client_data) { struct msm_hw_fence *join_fence; struct msm_hw_fence *hw_fence_child; @@ -1159,6 +1163,16 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, bool signal_join_fence = false; u64 hash; int i, ret = 0; + enum hw_fence_client_data_id data_id; + + if (client_data) { + data_id = hw_fence_get_client_data_id(hw_fence_client->client_id); + if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) { + HWFNC_ERR("Populating non-zero client_data:%llu with invalid client:%d\n", + client_data, hw_fence_client->client_id); + return -EINVAL; + } + } /* * Create join fence from the join-fences table, @@ -1248,11 +1262,15 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */ } + if (client_data) + join_fence->client_data[data_id] = client_data; + /* all fences were signaled, signal client now */ if (signal_join_fence) { /* signal the join hw fence */ - _fence_ctl_signal(drv_data, hw_fence_client, join_fence, *hash_join_fence, 0, 0); + _fence_ctl_signal(drv_data, hw_fence_client, join_fence, *hash_join_fence, 0, 0, + client_data); set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &array->base.flags); /* @@ -1275,9 +1293,19 @@ error_array: int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, - u64 seqno, u64 *hash) + u64 seqno, u64 *hash, u64 client_data) { struct msm_hw_fence *hw_fence; + enum hw_fence_client_data_id data_id; + + if (client_data) { + data_id = hw_fence_get_client_data_id(hw_fence_client->client_id); + if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) { + HWFNC_ERR("Populating non-zero client_data:%llu with invalid client:%d\n", + client_data, hw_fence_client->client_id); + return -EINVAL; + } + } /* find the hw fence within the table */ hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, hash); @@ -1292,6 +1320,8 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, hw_fence->wait_client_mask |= BIT(hw_fence_client->client_id); hw_fence->fence_wait_time = hw_fence_get_qtime(drv_data); hw_fence->debug_refcount++; + if (client_data) + hw_fence->client_data[data_id] = client_data; /* update memory for the table update */ wmb(); @@ -1302,7 +1332,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) { if (fence != NULL) set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &fence->flags); - _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, *hash, 0, 0); + _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, *hash, 0, client_data, 0); } return 0; @@ -1310,7 +1340,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, - struct dma_fence *fence, u64 *hash) + struct dma_fence *fence, u64 *hash, u64 client_data) { int ret = 0; @@ -1325,7 +1355,7 @@ int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, } ret = hw_fence_register_wait_client(drv_data, fence, hw_fence_client, fence->context, - fence->seqno, hash); + fence->seqno, hash, client_data); if (ret) HWFNC_ERR("Error registering for wait client:%d\n", hw_fence_client->client_id); @@ -1336,16 +1366,22 @@ static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hw_fence, u64 hash, int error) { enum hw_fence_client_id wait_client_id; + enum hw_fence_client_data_id data_id; struct msm_hw_fence_client *hw_fence_wait_client; + u64 client_data = 0; /* signal with an error all the waiting clients for this fence */ for (wait_client_id = 0; wait_client_id < HW_FENCE_CLIENT_MAX; wait_client_id++) { if (hw_fence->wait_client_mask & BIT(wait_client_id)) { hw_fence_wait_client = drv_data->clients[wait_client_id]; + data_id = hw_fence_get_client_data_id(wait_client_id); + + if (data_id < HW_FENCE_MAX_CLIENTS_WITH_DATA) + client_data = hw_fence->client_data[data_id]; if (hw_fence_wait_client) _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence, - hash, 0, error); + hash, 0, client_data, error); } } } @@ -1389,3 +1425,25 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, skip_destroy: return ret; } + +enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id client_id) +{ + enum hw_fence_client_data_id data_id; + + switch (client_id) { + case HW_FENCE_CLIENT_ID_CTX0: + data_id = HW_FENCE_CLIENT_DATA_ID_CTX0; + break; + case HW_FENCE_CLIENT_ID_VAL0: + data_id = HW_FENCE_CLIENT_DATA_ID_VAL0; + break; + case HW_FENCE_CLIENT_ID_VAL1: + data_id = HW_FENCE_CLIENT_DATA_ID_VAL1; + break; + default: + data_id = HW_FENCE_MAX_CLIENTS_WITH_DATA; + break; + } + + return data_id; +} diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index ee34367a54..02fe414f19 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -277,6 +277,7 @@ int msm_hw_fence_wait_update_v2(void *client_handle, struct msm_hw_fence_client *hw_fence_client; struct dma_fence_array *array; int i, ret = 0; + enum hw_fence_client_data_id data_id; if (IS_ERR_OR_NULL(client_handle) || !fence_list || !*fence_list) { HWFNC_ERR("Invalid data\n"); @@ -289,19 +290,28 @@ int msm_hw_fence_wait_update_v2(void *client_handle, } hw_fence_client = (struct msm_hw_fence_client *)client_handle; + data_id = hw_fence_get_client_data_id(hw_fence_client->client_id); + if (client_data_list && data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) { + HWFNC_ERR("Populating non-NULL client_data_list with unsupported client id:%d\n", + hw_fence_client->client_id); + return -EINVAL; + } HWFNC_DBG_H("+\n"); /* Process all the list of fences */ for (i = 0; i < num_fences; i++) { struct dma_fence *fence = fence_list[i]; - u64 hash; + u64 hash, client_data = 0; + + if (client_data_list) + client_data = client_data_list[i]; /* Process a Fence-Array */ array = to_dma_fence_array(fence); if (array) { ret = hw_fence_process_fence_array(hw_fence_drv_data, hw_fence_client, - array, &hash); + array, &hash, client_data); if (ret) { HWFNC_ERR("Failed to process FenceArray\n"); return ret; @@ -309,7 +319,7 @@ int msm_hw_fence_wait_update_v2(void *client_handle, } else { /* Process individual Fence */ ret = hw_fence_process_fence(hw_fence_drv_data, hw_fence_client, fence, - &hash); + &hash, client_data); if (ret) { HWFNC_ERR("Failed to process Fence\n"); return ret; @@ -394,7 +404,7 @@ int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 erro hw_fence_update_queue(hw_fence_drv_data, hw_fence_client, hw_fence_drv_data->hw_fences_tbl[handle].ctx_id, hw_fence_drv_data->hw_fences_tbl[handle].seq_id, handle, - flags, error, HW_FENCE_TX_QUEUE - 1); + flags, 0, error, HW_FENCE_TX_QUEUE - 1); return 0; } From b2efa8bc8bb0e2cdaab0585f39dfd8c30efc9aa0 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 19 Oct 2022 17:15:03 -0700 Subject: [PATCH 041/166] mm-drivers: hw_fence: add dtsi-based allocation of client queues Update hw fence driver to support configurable parameters for each client type, which can be set up through device-tree. This allows configuring number of queues (e.g. only Tx Queue or both Rx and Tx Queues), number of entries per client queue, and number of sub-clients for each client-type. Change-Id: I2d8f84ff2b7eb5322f9ca661cfd8f6a291db7b38 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 28 +++- hw_fence/src/hw_fence_drv_priv.c | 21 ++- hw_fence/src/hw_fence_drv_utils.c | 198 ++++++++++++++++++++++++--- hw_fence/src/msm_hw_fence.c | 8 ++ 4 files changed, 221 insertions(+), 34 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index aa93a6131e..29f1fb00c9 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -35,8 +35,8 @@ #define HW_FENCE_HFI_CTRL_HEADERS_SIZE (HW_FENCE_HFI_TABLE_HEADER_SIZE + \ (HW_FENCE_HFI_QUEUE_HEADER_SIZE * HW_FENCE_CTRL_QUEUES)) -#define HW_FENCE_HFI_CLIENT_HEADERS_SIZE (HW_FENCE_HFI_TABLE_HEADER_SIZE + \ - (HW_FENCE_HFI_QUEUE_HEADER_SIZE * HW_FENCE_CLIENT_QUEUES)) +#define HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num) (HW_FENCE_HFI_TABLE_HEADER_SIZE + \ + (HW_FENCE_HFI_QUEUE_HEADER_SIZE * queues_num)) /* * Max Payload size is the bigest size of the message that we can have in the CTRL queue @@ -230,6 +230,22 @@ struct msm_hw_fence_dbg_data { u64 lock_wake_cnt; }; +/** + * struct hw_fence_client_queue_size_desc - Structure holding client queue properties for a client. + * + * @queues_num: number of client queues + * @queue_entries: number of queue entries per client queue + * @mem_size: size of memory allocated for client queues + * @start_offset: start offset of client queue memory region, from beginning of carved-out memory + * allocation for hw fence driver + */ +struct hw_fence_client_queue_size_desc { + u32 queues_num; + u32 queue_entries; + u32 mem_size; + u32 start_offset; +}; + /** * struct hw_fence_driver_data - Structure holding internal hw-fence driver data * @@ -240,8 +256,7 @@ struct msm_hw_fence_dbg_data { * @hw_fence_queue_entries: total number of entries that can be available in the queue * @hw_fence_ctrl_queue_size: size of the ctrl queue for the payload * @hw_fence_mem_ctrl_queues_size: total size of ctrl queues, including: header + rxq + txq - * @hw_fence_client_queue_size: size of the client queue for the payload - * @hw_fence_mem_clients_queues_size: total size of client queues, including: header + rxq + txq + * @hw_fence_client_queue_size: descriptors of client queue properties for each hw fence client * @hw_fences_tbl: pointer to the hw-fences table * @hw_fences_tbl_cnt: number of elements in the hw-fence table * @client_lock_tbl: pointer to the per-client locks table @@ -257,6 +272,7 @@ struct msm_hw_fence_dbg_data { * @peer_name: peer name for this carved-out memory * @rm_nb: hyp resource manager notifier * @memparcel: memparcel for the allocated memory + * @used_mem_size: total memory size of global table, lock region, and ctrl and client queues * @db_label: doorbell label * @rx_dbl: handle to the Rx doorbell * @debugfs_data: debugfs info @@ -291,8 +307,7 @@ struct hw_fence_driver_data { u32 hw_fence_ctrl_queue_size; u32 hw_fence_mem_ctrl_queues_size; /* client queues */ - u32 hw_fence_client_queue_size; - u32 hw_fence_mem_clients_queues_size; + struct hw_fence_client_queue_size_desc hw_fence_client_queue_size[HW_FENCE_CLIENT_MAX]; /* HW Fences Table VA */ struct msm_hw_fence *hw_fences_tbl; @@ -316,6 +331,7 @@ struct hw_fence_driver_data { u32 peer_name; struct notifier_block rm_nb; u32 memparcel; + u32 used_mem_size; /* doorbell */ u32 db_label; diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 89dbc666a6..7dc3b69c56 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -46,8 +46,14 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD; break; case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: - headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE; - queue_size = drv_data->hw_fence_client_queue_size; + if (client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid client_id: %d\n", client_id); + return -EINVAL; + } + + headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num); + queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * + drv_data->hw_fence_client_queue_size[client_id].queue_entries; payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD; break; default: @@ -244,8 +250,10 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, u64 timestamp; int ret = 0; - if (queue_type >= HW_FENCE_CLIENT_QUEUES) { - HWFNC_ERR("Invalid queue type:%s\n", queue_type); + if (queue_type >= + drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].queues_num) { + HWFNC_ERR("Invalid queue type:%s client_id:%d\n", queue_type, + hw_fence_client->client_id); return -EINVAL; } @@ -526,7 +534,8 @@ int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data, /* Init client queues */ ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CLIENT_QUEUE, &hw_fence_client->mem_descriptor, hw_fence_client->queues, - HW_FENCE_CLIENT_QUEUES, hw_fence_client->client_id); + drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].queues_num, + hw_fence_client->client_id); if (ret) { HWFNC_ERR("Failure to init the queue for client:%d\n", hw_fence_client->client_id); @@ -549,7 +558,7 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, /* * Initialize IPCC Signals for this client * - * NOTE: Fore each Client HW-Core, the client drivers might be the ones making + * NOTE: For each Client HW-Core, the client drivers might be the ones making * it's own initialization (in case that any hw-sequence must be enforced), * however, if that is not the case, any per-client ipcc init to enable the * signaling, can go here. diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index c8eab917dd..a9a391982e 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -15,6 +15,73 @@ #include "hw_fence_drv_ipc.h" #include "hw_fence_drv_debug.h" +/** + * MAX_CLIENT_QUEUE_MEM_SIZE: + * Maximum memory size for client queues of a hw fence client. + */ +#define MAX_CLIENT_QUEUE_MEM_SIZE 0x100000 + +/** + * HW_FENCE_MAX_CLIENT_TYPE: + * Total number of client types (GFX, DPU, VAL) + */ +#define HW_FENCE_MAX_CLIENT_TYPE 3 + +/* Maximum number of clients for each client type */ +#define HW_FENCE_CLIENT_TYPE_MAX_GPU 1 +#define HW_FENCE_CLIENT_TYPE_MAX_DPU 6 +#define HW_FENCE_CLIENT_TYPE_MAX_VAL 7 + +/** + * struct hw_fence_client_type_desc - Structure holding client type properties, including static + * properties and client queue properties read from device-tree. + * + * @name: name of client type, used to parse properties from device-tree + * @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g. + * HW_FENCE_CLIENT_ID_CTL0 for DPU clients + * @max_clients_num: maximum number of clients of given client type + * @clients_num: number of clients of given client type + * @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or + * two (for both Tx and Rx Queues) + * @queue_entries: number of entries per client queue of given client type + * @mem_size: size of memory allocated for client queue(s) per client + */ +struct hw_fence_client_type_desc { + char *name; + enum hw_fence_client_id init_id; + u32 max_clients_num; + u32 clients_num; + u32 queues_num; + u32 queue_entries; + u32 mem_size; +}; + +/** + * struct hw_fence_client_types - Table describing all supported client types, used to parse + * device-tree properties related to client queue size. + * + * The fields name, init_id, and max_clients_num are constants. Default values for clients_num and + * queues_num are provided in this table, and clients_num, queues_num, and queue_entries can be read + * from device-tree. + * + * If a value for queue entries is not parsed for the client type, then the default number of client + * queue entries (parsed from device-tree) is used. + * + * Notes: + * 1. Client types must be in the same order as client_ids within the enum 'hw_fence_client_id'. + * 2. Each HW Fence client ID must be described by one of the client types in this table. + * 3. A new client type must set: name, init_id, max_clients_num, clients_num, and queues_num. + * 4. HW_FENCE_MAX_CLIENT_TYPE must be incremented for new client types. + */ +struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = { + {"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU, + HW_FENCE_CLIENT_QUEUES, 0, 0}, + {"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU, + HW_FENCE_CLIENT_QUEUES, 0, 0}, + {"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL, + HW_FENCE_CLIENT_QUEUES, 0, 0}, +}; + static void _lock(uint64_t *wait) { #if defined(__aarch64__) @@ -399,6 +466,11 @@ int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) return -ENXIO; } drv_data->size = resource_size(&drv_data->res); + if (drv_data->size < drv_data->used_mem_size) { + HWFNC_ERR("0x%x size of carved-out memory region is less than required size:0x%x\n", + drv_data->size, drv_data->used_mem_size); + return -ENOMEM; + } HWFNC_DBG_INIT("io_mem_base:0x%x start:0x%x end:0x%x size:0x%x name:%s\n", drv_data->io_mem_base, drv_data->res.start, @@ -469,12 +541,17 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, goto exit; } - start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size + - HW_FENCE_MEM_LOCKS_SIZE + - drv_data->hw_fence_mem_fences_table_size) + - ((client_id - 1) * drv_data->hw_fence_mem_clients_queues_size); - *size = drv_data->hw_fence_mem_clients_queues_size; + start_offset = drv_data->hw_fence_client_queue_size[client_id].start_offset; + *size = drv_data->hw_fence_client_queue_size[client_id].mem_size; + /* + * If this error occurs when client should be valid, check that support for this + * client has been configured in device-tree properties. + */ + if (!*size) { + HWFNC_ERR("invalid client_id:%d not reserved client queue\n", client_id); + ret = -EINVAL; + } break; default: HWFNC_ERR("Invalid mem reserve type:%d\n", type); @@ -501,6 +578,95 @@ exit: return ret; } +static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_data, + struct hw_fence_client_type_desc *desc) +{ + char name[31]; + u32 tmp[3]; + u32 queue_size; + int ret; + + /* parse client queue property from device-tree */ + snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s", desc->name); + ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 3); + if (ret) { + HWFNC_DBG_INIT("missing %s client queue entry or invalid ret:%d\n", desc->name, + ret); + desc->queue_entries = drv_data->hw_fence_queue_entries; + } else { + desc->clients_num = tmp[0]; + desc->queues_num = tmp[1]; + desc->queue_entries = tmp[2]; + } + + if (desc->clients_num > desc->max_clients_num || !desc->queues_num || + desc->queues_num > HW_FENCE_CLIENT_QUEUES || !desc->queue_entries) { + HWFNC_ERR("%s invalid dt: clients_num:%lu queues_num:%lu, queue_entries:%lu\n", + desc->name, desc->clients_num, desc->queues_num, desc->queue_entries); + return -EINVAL; + } + + /* compute mem_size */ + if (desc->queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) { + HWFNC_ERR("%s client queue entries:%lu will overflow client queue size\n", + desc->name, desc->queue_entries); + return -EINVAL; + } + + queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries; + if (queue_size >= ((U32_MAX & PAGE_MASK) - + HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) / desc->queues_num) { + HWFNC_ERR("%s client queue size:%lu will overflow client queue mem size\n", + desc->name, queue_size); + return -EINVAL; + } + + desc->mem_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) + + (queue_size * desc->queues_num)); + + if (desc->mem_size > MAX_CLIENT_QUEUE_MEM_SIZE) { + HWFNC_ERR("%s client queue mem_size:%lu greater than max client queue size:%lu\n", + desc->name, desc->mem_size, MAX_CLIENT_QUEUE_MEM_SIZE); + return -EINVAL; + } + + HWFNC_DBG_INIT("%s: clients=%lu q_num=%lu q_entries=%lu mem_sz=%lu\n", desc->name, + desc->clients_num, desc->queues_num, desc->queue_entries, desc->mem_size); + + return 0; +} + +static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data) +{ + struct hw_fence_client_type_desc *desc; + int i, j, ret; + u32 start_offset; + + start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size + + HW_FENCE_MEM_LOCKS_SIZE + drv_data->hw_fence_mem_fences_table_size); + for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) { + desc = &hw_fence_client_types[i]; + ret = _parse_client_queue_dt_props_indv(drv_data, desc); + if (ret) { + HWFNC_ERR("failed to initialize %s client queue size properties\n", + desc->name); + return ret; + } + + /* initialize client queue size desc for each client */ + for (j = 0; j < desc->clients_num; j++) { + drv_data->hw_fence_client_queue_size[desc->init_id + j] = + (struct hw_fence_client_queue_size_desc) + {desc->queues_num, desc->queue_entries, desc->mem_size, + start_offset}; + start_offset += desc->mem_size; + } + } + drv_data->used_mem_size = start_offset; + + return 0; +} + int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) { int ret; @@ -549,29 +715,17 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) /* clients queues init */ - if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) { - HWFNC_ERR("queue entries:%lu will overflow client queue size\n", - drv_data->hw_fence_queue_entries); + ret = _parse_client_queue_dt_props(drv_data); + if (ret) { + HWFNC_ERR("failed to parse client queue properties\n"); return -EINVAL; } - drv_data->hw_fence_client_queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * - drv_data->hw_fence_queue_entries; - - if (drv_data->hw_fence_client_queue_size >= ((U32_MAX & PAGE_MASK) - - HW_FENCE_HFI_CLIENT_HEADERS_SIZE) / HW_FENCE_CLIENT_QUEUES) { - HWFNC_ERR("queue size:%lu will overflow client queue mem size\n", - drv_data->hw_fence_client_queue_size); - return -EINVAL; - } - drv_data->hw_fence_mem_clients_queues_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE + - (HW_FENCE_CLIENT_QUEUES * drv_data->hw_fence_client_queue_size)); HWFNC_DBG_INIT("table: entries=%lu mem_size=%lu queue: entries=%lu\b", drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size, drv_data->hw_fence_queue_entries); - HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu clients queues: size=%lu mem_size=%lu\b", - drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size, - drv_data->hw_fence_client_queue_size, drv_data->hw_fence_mem_clients_queues_size); + HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu\b", + drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size); return 0; } diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 02fe414f19..5085592def 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -74,6 +74,14 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id, } hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id); + if (hw_fence_client->update_rxq && + hw_fence_drv_data->hw_fence_client_queue_size[client_id].queues_num < + HW_FENCE_CLIENT_QUEUES) { + HWFNC_ERR("Cannot update rx queue for tx queue-only client:%d\n", client_id); + ret = -EINVAL; + goto error; + } + hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id); /* Alloc Client HFI Headers and Queues */ From 368ae729919b911174c38f214b2d399d20d236d5 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 19 Oct 2022 17:15:03 -0700 Subject: [PATCH 042/166] mm-drivers: hw_fence: add support for ipe, vpu, and ife clients Update hw fence driver to support new clients with large number of possible sub-clients, which can be configured in device-tree. Add client queues support for ipe, vpu, and ife clients. Change-Id: I6e274819c1c154af3ea977d1d09e419d86f6fe8e Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_debug.h | 2 +- hw_fence/include/hw_fence_drv_priv.h | 20 ++-- hw_fence/include/hw_fence_drv_utils.h | 17 ++++ hw_fence/src/hw_fence_drv_priv.c | 22 +++- hw_fence/src/hw_fence_drv_utils.c | 140 ++++++++++++++++++++++++-- hw_fence/src/hw_fence_ioctl.c | 8 +- hw_fence/src/msm_hw_fence.c | 43 ++++++-- 7 files changed, 218 insertions(+), 34 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index bfb654e603..a1d66e0cdd 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -85,7 +85,7 @@ struct hw_fence_out_clients_map { * The index of this struct must match the enum hw_fence_client_id */ static const struct hw_fence_out_clients_map - dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_MAX] = { + dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_ID_VAL6 + 1] = { {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 0}, /* CTRL_LOOPBACK */ {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0}, /* CTX0 */ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 2}, /* CTL0 */ diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 29f1fb00c9..2645fbc638 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -48,8 +48,8 @@ #define HW_FENCE_CTRL_QUEUE_PAYLOAD HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE #define HW_FENCE_CLIENT_QUEUE_PAYLOAD (sizeof(struct msm_hw_fence_queue_payload)) -/* Locks area for all the clients */ -#define HW_FENCE_MEM_LOCKS_SIZE (sizeof(u64) * (HW_FENCE_CLIENT_MAX - 1)) +/* Locks area for all clients with RxQ */ +#define HW_FENCE_MEM_LOCKS_SIZE(rxq_clients_num) (sizeof(u64) * rxq_clients_num) #define HW_FENCE_TX_QUEUE 1 #define HW_FENCE_RX_QUEUE 2 @@ -165,7 +165,9 @@ enum payload_type { /** * struct msm_hw_fence_client - Structure holding the per-Client allocated resources. - * @client_id: id of the client + * @client_id: internal client_id used within HW fence driver; index into the clients struct + * @client_id_ext: external client_id, equal to client_id except for clients with configurable + * number of sub-clients (e.g. ife clients) * @mem_descriptor: hfi header memory descriptor * @queues: queues descriptor * @ipc_signal_id: id of the signal to be triggered for this client @@ -178,6 +180,7 @@ enum payload_type { */ struct msm_hw_fence_client { enum hw_fence_client_id client_id; + enum hw_fence_client_id client_id_ext; struct msm_hw_fence_mem_addr mem_descriptor; struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES]; int ipc_signal_id; @@ -257,6 +260,8 @@ struct hw_fence_client_queue_size_desc { * @hw_fence_ctrl_queue_size: size of the ctrl queue for the payload * @hw_fence_mem_ctrl_queues_size: total size of ctrl queues, including: header + rxq + txq * @hw_fence_client_queue_size: descriptors of client queue properties for each hw fence client + * @rxq_clients_num: number of supported hw fence clients with rxq (configured based on device-tree) + * @clients_num: number of supported hw fence clients (configured based on device-tree) * @hw_fences_tbl: pointer to the hw-fences table * @hw_fences_tbl_cnt: number of elements in the hw-fence table * @client_lock_tbl: pointer to the per-client locks table @@ -290,7 +295,7 @@ struct hw_fence_client_queue_size_desc { * @ctl_start_size: size of the ctl_start registers of the display hw (platforms with no dpu-ipc) * @client_id_mask: bitmask for tracking registered client_ids * @clients_register_lock: lock to synchronize clients registration and deregistration - * @msm_hw_fence_client: table with the handles of the registered clients + * @clients: table with the handles of the registered clients; size is equal to clients_num * @vm_ready: flag to indicate if vm has been initialized * @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized */ @@ -307,7 +312,10 @@ struct hw_fence_driver_data { u32 hw_fence_ctrl_queue_size; u32 hw_fence_mem_ctrl_queues_size; /* client queues */ - struct hw_fence_client_queue_size_desc hw_fence_client_queue_size[HW_FENCE_CLIENT_MAX]; + struct hw_fence_client_queue_size_desc *hw_fence_client_queue_size; + struct hw_fence_client_type_desc *hw_fence_client_types; + u32 rxq_clients_num; + u32 clients_num; /* HW Fences Table VA */ struct msm_hw_fence *hw_fences_tbl; @@ -366,7 +374,7 @@ struct hw_fence_driver_data { struct mutex clients_register_lock; /* table with registered client handles */ - struct msm_hw_fence_client *clients[HW_FENCE_CLIENT_MAX]; + struct msm_hw_fence_client **clients; bool vm_ready; #ifdef HW_DPU_IPCC diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 2ef6df0fe9..ac8b504a52 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -111,4 +111,21 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, u32 reset_flags); +/** + * hw_fence_utils_get_client_id_priv() - Gets the index into clients struct within hw fence driver + * from the client_id used externally + * + * Performs a 1-to-1 mapping for all client IDs less than HW_FENCE_MAX_STATIC_CLIENTS_INDEX, + * otherwise consolidates client IDs of clients with configurable number of sub-clients. Fails if + * provided with client IDs for such clients when support for those clients is not configured in + * device-tree. + * + * @drv_data: hw fence driver data + * @client_id: external client_id to get internal client_id for + * + * Returns client_id < drv_data->clients_num if success, otherwise returns HW_FENCE_CLIENT_MAX + */ +enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data, + enum hw_fence_client_id client_id); + #endif /* __HW_FENCE_DRV_UTILS_H */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 7dc3b69c56..ce7546de69 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -46,7 +46,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD; break; case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: - if (client_id >= HW_FENCE_CLIENT_MAX) { + if (client_id >= drv_data->clients_num) { HWFNC_ERR("Invalid client_id: %d\n", client_id); return -EINVAL; } @@ -563,7 +563,7 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, * however, if that is not the case, any per-client ipcc init to enable the * signaling, can go here. */ - switch (hw_fence_client->client_id) { + switch ((int)hw_fence_client->client_id) { case HW_FENCE_CLIENT_ID_CTX0: /* nothing to initialize for gpu client */ break; @@ -596,6 +596,16 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, } #endif /* HW_DPU_IPCC */ break; + case HW_FENCE_CLIENT_ID_IPE: + /* nothing to initialize for IPE client */ + break; + case HW_FENCE_CLIENT_ID_VPU: + /* nothing to initialize for VPU client */ + break; + case HW_FENCE_CLIENT_ID_IFE0 ... HW_FENCE_CLIENT_ID_IFE7 + + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: + /* nothing to initialize for IFE clients */ + break; default: HWFNC_ERR("Unexpected client:%d\n", hw_fence_client->client_id); ret = -EINVAL; @@ -1397,7 +1407,7 @@ static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, u64 client_data = 0; /* signal with an error all the waiting clients for this fence */ - for (wait_client_id = 0; wait_client_id < HW_FENCE_CLIENT_MAX; wait_client_id++) { + for (wait_client_id = 0; wait_client_id <= drv_data->rxq_clients_num; wait_client_id++) { if (hw_fence->wait_client_mask & BIT(wait_client_id)) { hw_fence_wait_client = drv_data->clients[wait_client_id]; data_id = hw_fence_get_client_data_id(wait_client_id); @@ -1466,6 +1476,12 @@ enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id case HW_FENCE_CLIENT_ID_VAL1: data_id = HW_FENCE_CLIENT_DATA_ID_VAL1; break; + case HW_FENCE_CLIENT_ID_IPE: + data_id = HW_FENCE_CLIENT_DATA_ID_IPE; + break; + case HW_FENCE_CLIENT_ID_VPU: + data_id = HW_FENCE_CLIENT_DATA_ID_VPU; + break; default: data_id = HW_FENCE_MAX_CLIENTS_WITH_DATA; break; diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index a9a391982e..e1857bb962 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -22,15 +22,49 @@ #define MAX_CLIENT_QUEUE_MEM_SIZE 0x100000 /** - * HW_FENCE_MAX_CLIENT_TYPE: - * Total number of client types (GFX, DPU, VAL) + * HW_FENCE_MAX_CLIENT_TYPE_STATIC: + * Total number of client types without configurable number of sub-clients (GFX, DPU, VAL, IPE, VPU) */ -#define HW_FENCE_MAX_CLIENT_TYPE 3 +#define HW_FENCE_MAX_CLIENT_TYPE_STATIC 5 + +/** + * HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE: + * Maximum number of client types with configurable number of sub-clients (e.g. IFE) + */ +#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 8 + +/** + * HW_FENCE_MAX_CLIENT_TYPE: + * Total number of client types with and without configurable number of sub-clients + */ +#define HW_FENCE_MAX_CLIENT_TYPE (HW_FENCE_MAX_CLIENT_TYPE_STATIC + \ + HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE) + +/** + * HW_FENCE_MAX_STATIC_CLIENTS_INDEX: + * Maximum number of static clients, i.e. clients without configurable numbers of sub-clients + */ +#define HW_FENCE_MAX_STATIC_CLIENTS_INDEX HW_FENCE_CLIENT_ID_IFE0 + +/** + * HW_FENCE_MIN_RXQ_CLIENTS: + * Minimum number of static hw fence clients with rxq + */ +#define HW_FENCE_MIN_RXQ_CLIENTS HW_FENCE_CLIENT_ID_VAL6 + +/** + * HW_FENCE_MIN_RXQ_CLIENT_TYPE: + * Minimum number of static hw fence client types with rxq (GFX, DPU, VAL) + */ +#define HW_FENCE_MIN_RXQ_CLIENT_TYPE 3 /* Maximum number of clients for each client type */ #define HW_FENCE_CLIENT_TYPE_MAX_GPU 1 #define HW_FENCE_CLIENT_TYPE_MAX_DPU 6 #define HW_FENCE_CLIENT_TYPE_MAX_VAL 7 +#define HW_FENCE_CLIENT_TYPE_MAX_IPE 1 +#define HW_FENCE_CLIENT_TYPE_MAX_VPU 1 +#define HW_FENCE_CLIENT_TYPE_MAX_IFE 32 /** * struct hw_fence_client_type_desc - Structure holding client type properties, including static @@ -71,7 +105,8 @@ struct hw_fence_client_type_desc { * 1. Client types must be in the same order as client_ids within the enum 'hw_fence_client_id'. * 2. Each HW Fence client ID must be described by one of the client types in this table. * 3. A new client type must set: name, init_id, max_clients_num, clients_num, and queues_num. - * 4. HW_FENCE_MAX_CLIENT_TYPE must be incremented for new client types. + * 4. Either constant HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE or HW_FENCE_MAX_CLIENT_TYPE_STATIC must + * be incremented as appropriate for new client types. */ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = { {"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU, @@ -80,6 +115,18 @@ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] HW_FENCE_CLIENT_QUEUES, 0, 0}, {"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_QUEUES, 0, 0}, + {"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, + HW_FENCE_CLIENT_QUEUES, 0, 0}, + {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, + HW_FENCE_CLIENT_QUEUES, 0, 0}, + {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, + {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, + {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, + {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, + {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, + {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, + {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, + {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, }; static void _lock(uint64_t *wait) @@ -527,15 +574,16 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, case HW_FENCE_MEM_RESERVE_LOCKS_REGION: /* Locks region starts at the end of the ctrl queues */ start_offset = drv_data->hw_fence_mem_ctrl_queues_size; - *size = HW_FENCE_MEM_LOCKS_SIZE; + *size = HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num); break; case HW_FENCE_MEM_RESERVE_TABLE: /* HW Fence table starts at the end of the Locks region */ - start_offset = drv_data->hw_fence_mem_ctrl_queues_size + HW_FENCE_MEM_LOCKS_SIZE; + start_offset = drv_data->hw_fence_mem_ctrl_queues_size + + HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num); *size = drv_data->hw_fence_mem_fences_table_size; break; case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: - if (client_id >= HW_FENCE_CLIENT_MAX) { + if (client_id >= drv_data->clients_num) { HWFNC_ERR("unexpected client_id:%d\n", client_id); ret = -EINVAL; goto exit; @@ -641,9 +689,10 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data) struct hw_fence_client_type_desc *desc; int i, j, ret; u32 start_offset; + size_t size; + int configurable_clients_num = 0; - start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size + - HW_FENCE_MEM_LOCKS_SIZE + drv_data->hw_fence_mem_fences_table_size); + drv_data->rxq_clients_num = HW_FENCE_MIN_RXQ_CLIENTS; for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) { desc = &hw_fence_client_types[i]; ret = _parse_client_queue_dt_props_indv(drv_data, desc); @@ -653,12 +702,43 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data) return ret; } - /* initialize client queue size desc for each client */ + if (i >= HW_FENCE_MIN_RXQ_CLIENT_TYPE && + desc->queues_num == HW_FENCE_CLIENT_QUEUES) + drv_data->rxq_clients_num += desc->clients_num; + + if (i >= HW_FENCE_MAX_CLIENT_TYPE_STATIC) + configurable_clients_num += desc->clients_num; + } + + /* store client type descriptors for configurable client indexing logic */ + drv_data->hw_fence_client_types = hw_fence_client_types; + + /* clients and size desc are allocated for all static clients regardless of device-tree */ + drv_data->clients_num = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + configurable_clients_num; + + /* allocate memory for client queue size descriptors */ + size = drv_data->clients_num * sizeof(struct hw_fence_client_queue_size_desc); + drv_data->hw_fence_client_queue_size = kzalloc(size, GFP_KERNEL); + if (!drv_data->hw_fence_client_queue_size) + return -ENOMEM; + + /* initialize client queue size desc for each client */ + start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size + + HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num) + + drv_data->hw_fence_mem_fences_table_size); + for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) { + desc = &hw_fence_client_types[i]; for (j = 0; j < desc->clients_num; j++) { - drv_data->hw_fence_client_queue_size[desc->init_id + j] = + enum hw_fence_client_id client_id_ext = desc->init_id + j; + enum hw_fence_client_id client_id = + hw_fence_utils_get_client_id_priv(drv_data, client_id_ext); + + drv_data->hw_fence_client_queue_size[client_id] = (struct hw_fence_client_queue_size_desc) {desc->queues_num, desc->queue_entries, desc->mem_size, start_offset}; + HWFNC_DBG_INIT("%s client_id_ext:%lu client_id:%lu start_offset:%lu\n", + desc->name, client_id_ext, client_id, start_offset); start_offset += desc->mem_size; } } @@ -670,6 +750,7 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data) int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) { int ret; + size_t size; u32 val = 0; ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-table-entries", &val); @@ -721,11 +802,20 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) return -EINVAL; } + /* allocate clients */ + + size = drv_data->clients_num * sizeof(struct msm_hw_fence_client *); + drv_data->clients = kzalloc(size, GFP_KERNEL); + if (!drv_data->clients) + return -ENOMEM; + HWFNC_DBG_INIT("table: entries=%lu mem_size=%lu queue: entries=%lu\b", drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size, drv_data->hw_fence_queue_entries); HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu\b", drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size); + HWFNC_DBG_INIT("clients_num: %lu, total_mem_size:%lu\n", drv_data->clients_num, + drv_data->used_mem_size); return 0; } @@ -837,3 +927,31 @@ int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data) return 0; } + +enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data, + enum hw_fence_client_id client_id) +{ + int i, client_type, offset; + enum hw_fence_client_id client_id_priv; + + if (client_id < HW_FENCE_MAX_STATIC_CLIENTS_INDEX) + return client_id; + + /* consolidate external 'hw_fence_client_id' enum into consecutive internal client IDs */ + client_type = HW_FENCE_MAX_CLIENT_TYPE_STATIC + + (client_id - HW_FENCE_MAX_STATIC_CLIENTS_INDEX) / + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT; + offset = (client_id - HW_FENCE_MAX_STATIC_CLIENTS_INDEX) % + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT; + + /* invalid client id out of range of supported configurable sub-clients */ + if (offset >= drv_data->hw_fence_client_types[client_type].clients_num) + return HW_FENCE_CLIENT_MAX; + + client_id_priv = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + offset; + + for (i = HW_FENCE_MAX_CLIENT_TYPE_STATIC; i < client_type; i++) + client_id_priv += drv_data->hw_fence_client_types[i].clients_num; + + return client_id_priv; +} diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 72566126c6..02942dc3ea 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -132,9 +132,9 @@ static bool _is_valid_client(struct hw_sync_obj *obj) if (!obj) return false; - if (obj->client_id < HW_FENCE_CLIENT_ID_VAL0 || obj->client_id >= HW_FENCE_CLIENT_MAX) { + if (obj->client_id < HW_FENCE_CLIENT_ID_VAL0 || obj->client_id > HW_FENCE_CLIENT_ID_VAL6) { HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", obj->client_id, - HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_MAX); + HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6); return false; } @@ -151,9 +151,9 @@ static int _get_client_id(struct hw_sync_obj *obj, unsigned long arg) if (!obj) return -EINVAL; - if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id >= HW_FENCE_CLIENT_MAX) { + if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id > HW_FENCE_CLIENT_ID_VAL6) { HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", client_id, - HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_MAX); + HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6); return -EINVAL; } diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 5085592def..30eaf25c70 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -17,24 +17,33 @@ struct hw_fence_driver_data *hw_fence_drv_data; static bool hw_fence_driver_enable; -void *msm_hw_fence_register(enum hw_fence_client_id client_id, +void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, struct msm_hw_fence_mem_addr *mem_descriptor) { struct msm_hw_fence_client *hw_fence_client; + enum hw_fence_client_id client_id; int ret; - HWFNC_DBG_H("++ client_id:%d\n", client_id); + HWFNC_DBG_H("++ client_id_ext:%d\n", client_id_ext); if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { HWFNC_ERR("hw fence driver not ready\n"); return ERR_PTR(-EAGAIN); } - if (!mem_descriptor || client_id >= HW_FENCE_CLIENT_MAX) { - HWFNC_ERR("Invalid params: %d client_id:%d\n", - !mem_descriptor, client_id); + if (!mem_descriptor || client_id_ext >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid params: %d client_id_ext:%d\n", + !mem_descriptor, client_id_ext); return ERR_PTR(-EINVAL); } + + client_id = hw_fence_utils_get_client_id_priv(hw_fence_drv_data, client_id_ext); + if (client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid params: client_id:%d client_id_ext:%d\n", + client_id, client_id_ext); + return ERR_PTR(-EINVAL); + } + /* Alloc client handle */ hw_fence_client = kzalloc(sizeof(*hw_fence_client), GFP_KERNEL); if (!hw_fence_client) @@ -54,6 +63,7 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id, mutex_unlock(&hw_fence_drv_data->clients_register_lock); hw_fence_client->client_id = client_id; + hw_fence_client->client_id_ext = client_id_ext; hw_fence_client->ipc_client_vid = hw_fence_ipcc_get_client_virt_id(hw_fence_drv_data, client_id); hw_fence_client->ipc_client_pid = @@ -132,7 +142,7 @@ int msm_hw_fence_deregister(void *client_handle) } hw_fence_client = (struct msm_hw_fence_client *)client_handle; - if (hw_fence_client->client_id >= HW_FENCE_CLIENT_MAX) { + if (hw_fence_client->client_id >= hw_fence_drv_data->clients_num) { HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id); return -EINVAL; } @@ -257,7 +267,7 @@ int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle) } hw_fence_client = (struct msm_hw_fence_client *)client_handle; - if (hw_fence_client->client_id >= HW_FENCE_CLIENT_MAX) { + if (hw_fence_client->client_id >= hw_fence_drv_data->clients_num) { HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id); return -EINVAL; } @@ -305,6 +315,12 @@ int msm_hw_fence_wait_update_v2(void *client_handle, return -EINVAL; } + if (hw_fence_client->client_id > hw_fence_drv_data->rxq_clients_num) { + HWFNC_ERR("Transmit-only client client_id:%d client_id_ext:%d register for wait\n", + hw_fence_client->client_id, hw_fence_client->client_id_ext); + return -EINVAL; + } + HWFNC_DBG_H("+\n"); /* Process all the list of fences */ @@ -380,10 +396,19 @@ int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) } EXPORT_SYMBOL(msm_hw_fence_reset_client); -int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id, u32 reset_flags) +int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id_ext, u32 reset_flags) { + enum hw_fence_client_id client_id; + + if (client_id_ext >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid client_id_ext:%d\n", client_id_ext); + return -EINVAL; + } + + client_id = hw_fence_utils_get_client_id_priv(hw_fence_drv_data, client_id_ext); + if (client_id >= HW_FENCE_CLIENT_MAX) { - HWFNC_ERR("Invalid client_id:%d\n", client_id); + HWFNC_ERR("Invalid client_id:%d client_id_ext:%d\n", client_id, client_id_ext); return -EINVAL; } From 965d398c0635af4ab59fac971fba9a662abfe016 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 19 Oct 2022 15:17:59 -0700 Subject: [PATCH 043/166] mm-drivers: hw_fence: update ipc for ipe, vpu, ife clients Update ipc configurations to support IPE, VPU, and IFE clients in hw fence driver. Add support for IPE and VPU clients on kalama, and add support for all clients on pineaple. Change-Id: Iee577118284a02bd5b368ca206e88ed75eaa95b3 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_ipc.h | 20 ++++ hw_fence/include/hw_fence_drv_utils.h | 42 ++++++++ hw_fence/src/hw_fence_drv_ipc.c | 147 +++++++++++++++++++++++--- hw_fence/src/hw_fence_drv_utils.c | 36 ------- 4 files changed, 196 insertions(+), 49 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_ipc.h b/hw_fence/include/hw_fence_drv_ipc.h index e905ea8ed6..07b7aa754c 100644 --- a/hw_fence/include/hw_fence_drv_ipc.h +++ b/hw_fence/include/hw_fence_drv_ipc.h @@ -9,12 +9,32 @@ /* ipc clients virtual client-id */ #define HW_FENCE_IPC_CLIENT_ID_APPS_VID 8 #define HW_FENCE_IPC_CLIENT_ID_GPU_VID 9 +#define HW_FENCE_IPC_CLIENT_ID_IPE_VID 11 +#define HW_FENCE_IPC_CLIENT_ID_VPU_VID 12 #define HW_FENCE_IPC_CLIENT_ID_DPU_VID 25 +#define HW_FENCE_IPC_CLIENT_ID_IFE0_VID 128 +#define HW_FENCE_IPC_CLIENT_ID_IFE1_VID 129 +#define HW_FENCE_IPC_CLIENT_ID_IFE2_VID 130 +#define HW_FENCE_IPC_CLIENT_ID_IFE3_VID 131 +#define HW_FENCE_IPC_CLIENT_ID_IFE4_VID 132 +#define HW_FENCE_IPC_CLIENT_ID_IFE5_VID 133 +#define HW_FENCE_IPC_CLIENT_ID_IFE6_VID 134 +#define HW_FENCE_IPC_CLIENT_ID_IFE7_VID 135 /* ipc clients physical client-id */ #define HW_FENCE_IPC_CLIENT_ID_APPS_PID 3 #define HW_FENCE_IPC_CLIENT_ID_GPU_PID 4 +#define HW_FENCE_IPC_CLIENT_ID_IPE_PID 5 +#define HW_FENCE_IPC_CLIENT_ID_VPU_PID 8 #define HW_FENCE_IPC_CLIENT_ID_DPU_PID 9 +#define HW_FENCE_IPC_CLIENT_ID_IFE0_PID 11 +#define HW_FENCE_IPC_CLIENT_ID_IFE1_PID 12 +#define HW_FENCE_IPC_CLIENT_ID_IFE2_PID 13 +#define HW_FENCE_IPC_CLIENT_ID_IFE3_PID 14 +#define HW_FENCE_IPC_CLIENT_ID_IFE4_PID 15 +#define HW_FENCE_IPC_CLIENT_ID_IFE5_PID 16 +#define HW_FENCE_IPC_CLIENT_ID_IFE6_PID 17 +#define HW_FENCE_IPC_CLIENT_ID_IFE7_PID 18 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA 2 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO 1 diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index ac8b504a52..6d9cd9627d 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -6,6 +6,24 @@ #ifndef __HW_FENCE_DRV_UTILS_H #define __HW_FENCE_DRV_UTILS_H +/** + * HW_FENCE_MAX_CLIENT_TYPE_STATIC: + * Total number of client types without configurable number of sub-clients (GFX, DPU, VAL, IPE, VPU) + */ +#define HW_FENCE_MAX_CLIENT_TYPE_STATIC 5 + +/** + * HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE: + * Maximum number of client types with configurable number of sub-clients (e.g. IFE) + */ +#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 8 + +/** + * HW_FENCE_MAX_STATIC_CLIENTS_INDEX: + * Maximum number of static clients, i.e. clients without configurable numbers of sub-clients + */ +#define HW_FENCE_MAX_STATIC_CLIENTS_INDEX HW_FENCE_CLIENT_ID_IFE0 + /** * enum hw_fence_mem_reserve - Types of reservations for the carved-out memory. * HW_FENCE_MEM_RESERVE_CTRL_QUEUE: Reserve memory for the ctrl rx/tx queues. @@ -20,6 +38,30 @@ enum hw_fence_mem_reserve { HW_FENCE_MEM_RESERVE_CLIENT_QUEUE }; +/** + * struct hw_fence_client_type_desc - Structure holding client type properties, including static + * properties and client queue properties read from device-tree. + * + * @name: name of client type, used to parse properties from device-tree + * @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g. + * HW_FENCE_CLIENT_ID_CTL0 for DPU clients + * @max_clients_num: maximum number of clients of given client type + * @clients_num: number of clients of given client type + * @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or + * two (for both Tx and Rx Queues) + * @queue_entries: number of entries per client queue of given client type + * @mem_size: size of memory allocated for client queue(s) per client + */ +struct hw_fence_client_type_desc { + char *name; + enum hw_fence_client_id init_id; + u32 max_clients_num; + u32 clients_num; + u32 queues_num; + u32 queue_entries; + u32 mem_size; +}; + /** * global_atomic_store() - Inter-processor lock * @drv_data: hw fence driver data diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index a3cccfbf31..7e46e08f53 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -9,6 +9,13 @@ #include "hw_fence_drv_ipc.h" #include "hw_fence_drv_debug.h" +/* + * Max size of base table with ipc mappings, with one mapping per client type with configurable + * number of subclients + */ +#define HW_FENCE_IPC_MAP_MAX (HW_FENCE_MAX_STATIC_CLIENTS_INDEX + \ + HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE) + /** * struct hw_fence_client_ipc_map - map client id with ipc signal for trigger. * @ipc_client_id_virt: virtual ipc client id for the hw-fence client. @@ -36,7 +43,7 @@ struct hw_fence_client_ipc_map { * To change to a loopback signal instead of GMU, change ctx0 row to use: * {HW_FENCE_IPC_CLIENT_ID_APPS, 20}. */ -struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_MAX] = { +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_IPC_MAP_MAX] = { {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true},/* ctrlq*/ {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false},/* ctx0 */ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 14, false, true},/*ctl0*/ @@ -64,7 +71,7 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_M * * Note that the index of this struct must match the enum hw_fence_client_id */ -struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = { +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_IPC_MAP_MAX] = { {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true},/*ctrl q*/ {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false},/*ctx0 */ {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 0, false, true},/* ctl0 */ @@ -81,7 +88,17 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = { {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, true},/* val4*/ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, true},/* val5*/ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, true},/* val6*/ +#else + {0, 0, 0, false, false}, /* val0 */ + {0, 0, 0, false, false}, /* val1 */ + {0, 0, 0, false, false}, /* val2 */ + {0, 0, 0, false, false}, /* val3 */ + {0, 0, 0, false, false}, /* val4 */ + {0, 0, 0, false, false}, /* val5 */ + {0, 0, 0, false, false}, /* val6 */ #endif /* CONFIG_DEBUG_FS */ + {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_VID, 0, true, true}, /* ipe */ + {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_VID, 0, true, true}, /* vpu */ }; /** @@ -90,9 +107,12 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = { * signaled. * This version is for targets that support dpu client id and IPC v2. * - * Note that the index of this struct must match the enum hw_fence_client_id + * Note that the index of this struct must match the enum hw_fence_client_id for clients ids less + * than HW_FENCE_MAX_STATIC_CLIENTS_INDEX. + * For clients with configurable sub-clients, the index of this struct matches + * HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC). */ -struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_CLIENT_MAX] = { +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_IPC_MAP_MAX] = { {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 1, true, true},/*ctrlq */ {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, false, false},/* ctx0*/ {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 0, false, true},/* ctl0 */ @@ -109,12 +129,30 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_CLIENT_MAX] {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, true},/* val4*/ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, true},/* val5*/ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, true},/* val6*/ +#else + {0, 0, 0, false, false}, /* val0 */ + {0, 0, 0, false, false}, /* val1 */ + {0, 0, 0, false, false}, /* val2 */ + {0, 0, 0, false, false}, /* val3 */ + {0, 0, 0, false, false}, /* val4 */ + {0, 0, 0, false, false}, /* val5 */ + {0, 0, 0, false, false}, /* val6 */ #endif /* CONFIG_DEBUG_FS */ + {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID, 0, true, true}, /* ipe */ + {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true}, /* vpu */ + {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, true},/* ife0*/ + {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, true},/* ife1*/ + {HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, true},/* ife2*/ + {HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, true},/* ife3*/ + {HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, true},/* ife4*/ + {HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, true},/* ife5*/ + {HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, true},/* ife6*/ + {HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, true},/* ife7*/ }; int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id) { - if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + if (!drv_data || client_id >= drv_data->clients_num) return -EINVAL; return drv_data->ipc_clients_table[client_id].ipc_client_id_virt; @@ -122,7 +160,7 @@ int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 client_id) { - if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + if (!drv_data || client_id >= drv_data->clients_num) return -EINVAL; return drv_data->ipc_clients_table[client_id].ipc_client_id_phys; @@ -130,7 +168,7 @@ int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id) { - if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + if (!drv_data || client_id >= drv_data->clients_num) return -EINVAL; return drv_data->ipc_clients_table[client_id].ipc_signal_id; @@ -138,8 +176,8 @@ int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 clien bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id) { - if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) - return -EINVAL; + if (!drv_data || client_id >= drv_data->clients_num) + return false; return drv_data->ipc_clients_table[client_id].update_rxq; } @@ -147,7 +185,7 @@ bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int c bool hw_fence_ipcc_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id) { if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) - return -EINVAL; + return false; return drv_data->ipc_clients_table[client_id].send_ipc; } @@ -164,6 +202,26 @@ static inline char *_get_ipc_phys_client_name(u32 client_id) return "GPU_PID"; case HW_FENCE_IPC_CLIENT_ID_DPU_PID: return "DPU_PID"; + case HW_FENCE_IPC_CLIENT_ID_IPE_PID: + return "IPE_PID"; + case HW_FENCE_IPC_CLIENT_ID_VPU_PID: + return "VPU_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE0_PID: + return "IFE0_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE1_PID: + return "IFE1_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE2_PID: + return "IFE2_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE3_PID: + return "IFE3_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE4_PID: + return "IFE4_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE5_PID: + return "IFE5_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE6_PID: + return "IFE6_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE7_PID: + return "IFE7_PID"; } return "UNKNOWN_PID"; @@ -181,6 +239,26 @@ static inline char *_get_ipc_virt_client_name(u32 client_id) return "GPU_VID"; case HW_FENCE_IPC_CLIENT_ID_DPU_VID: return "DPU_VID"; + case HW_FENCE_IPC_CLIENT_ID_IPE_VID: + return "IPE_VID"; + case HW_FENCE_IPC_CLIENT_ID_VPU_VID: + return "VPU_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE0_VID: + return "IFE0_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE1_VID: + return "IFE1_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE2_VID: + return "IFE2_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE3_VID: + return "IFE3_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE4_VID: + return "IFE4_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE5_VID: + return "IFE5_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE6_VID: + return "IFE6_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE7_VID: + return "IFE7_VID"; } return "UNKNOWN_VID"; @@ -208,6 +286,46 @@ void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data, wmb(); } +static int _hw_fence_ipcc_init_map_with_configurable_clients(struct hw_fence_driver_data *drv_data, + struct hw_fence_client_ipc_map *base_table) +{ + int i, j, map_idx; + size_t size; + + size = drv_data->clients_num * sizeof(struct hw_fence_client_ipc_map); + drv_data->ipc_clients_table = kzalloc(size, GFP_KERNEL); + + if (!drv_data->ipc_clients_table) + return -ENOMEM; + + /* copy mappings for static hw fence clients */ + size = HW_FENCE_MAX_STATIC_CLIENTS_INDEX * sizeof(struct hw_fence_client_ipc_map); + memcpy(drv_data->ipc_clients_table, base_table, size); + + /* initialize mappings for ipc clients with configurable number of hw fence clients */ + map_idx = HW_FENCE_MAX_STATIC_CLIENTS_INDEX; + for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE; i++) { + int client_type = HW_FENCE_MAX_CLIENT_TYPE_STATIC + i; + int clients_num = drv_data->hw_fence_client_types[client_type].clients_num; + + for (j = 0; j < clients_num; j++) { + /* this should never happen if drv_data->clients_num is correct */ + if (map_idx >= drv_data->clients_num) { + HWFNC_ERR("%s clients_num:%lu exceeds drv_data->clients_num:%lu\n", + drv_data->hw_fence_client_types[client_type].name, + clients_num, drv_data->clients_num); + return -EINVAL; + } + drv_data->ipc_clients_table[map_idx] = + base_table[HW_FENCE_MAX_STATIC_CLIENTS_INDEX + i]; + drv_data->ipc_clients_table[map_idx].ipc_signal_id = j; + map_idx++; + } + } + + return 0; +} + /** * _hw_fence_ipcc_hwrev_init() - Initializes internal driver struct with corresponding ipcc data, * according to the ipcc hw revision. @@ -216,6 +334,8 @@ void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data, */ static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 hwrev) { + int ret = 0; + switch (hwrev) { case HW_FENCE_IPCC_HW_REV_100: drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; @@ -242,14 +362,15 @@ static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID; drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE; /* Fence */ - drv_data->ipc_clients_table = hw_fence_clients_ipc_map_v2; + ret = _hw_fence_ipcc_init_map_with_configurable_clients(drv_data, + hw_fence_clients_ipc_map_v2); HWFNC_DBG_INIT("ipcc protocol_id: Pineapple\n"); break; default: return -1; } - return 0; + return ret; } int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) @@ -320,7 +441,7 @@ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data) HWFNC_DBG_H("Initialize dpu signals\n"); /* Enable Client-Signal pairs from DPU (25) to APPS(NS) (8) */ - for (i = 0; i < HW_FENCE_CLIENT_MAX; i++) { + for (i = 0; i < drv_data->clients_num; i++) { hw_fence_client = &drv_data->ipc_clients_table[i]; /* skip any client that is not a dpu client */ diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index e1857bb962..d530950785 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -21,18 +21,6 @@ */ #define MAX_CLIENT_QUEUE_MEM_SIZE 0x100000 -/** - * HW_FENCE_MAX_CLIENT_TYPE_STATIC: - * Total number of client types without configurable number of sub-clients (GFX, DPU, VAL, IPE, VPU) - */ -#define HW_FENCE_MAX_CLIENT_TYPE_STATIC 5 - -/** - * HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE: - * Maximum number of client types with configurable number of sub-clients (e.g. IFE) - */ -#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 8 - /** * HW_FENCE_MAX_CLIENT_TYPE: * Total number of client types with and without configurable number of sub-clients @@ -66,30 +54,6 @@ #define HW_FENCE_CLIENT_TYPE_MAX_VPU 1 #define HW_FENCE_CLIENT_TYPE_MAX_IFE 32 -/** - * struct hw_fence_client_type_desc - Structure holding client type properties, including static - * properties and client queue properties read from device-tree. - * - * @name: name of client type, used to parse properties from device-tree - * @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g. - * HW_FENCE_CLIENT_ID_CTL0 for DPU clients - * @max_clients_num: maximum number of clients of given client type - * @clients_num: number of clients of given client type - * @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or - * two (for both Tx and Rx Queues) - * @queue_entries: number of entries per client queue of given client type - * @mem_size: size of memory allocated for client queue(s) per client - */ -struct hw_fence_client_type_desc { - char *name; - enum hw_fence_client_id init_id; - u32 max_clients_num; - u32 clients_num; - u32 queues_num; - u32 queue_entries; - u32 mem_size; -}; - /** * struct hw_fence_client_types - Table describing all supported client types, used to parse * device-tree properties related to client queue size. From f4afac60ba7a802d5221d05368d63ef922d03422 Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 17 Oct 2022 13:11:26 -0700 Subject: [PATCH 044/166] mm-drivers: hw_fence: update txq to use separate software wr ptr Some hw fence driver clients require the ability to call the 'msm_hw_fence_update_txq' API to update the queue payload without updating the 'write_index' member within the hfi header. These clients also need to receive the index at which the payload is written within the queue. This change adds support for this requirement by adding a device-tree property to configure this behavior for each client. The 'tx_wm' member within the hfi header is used to track in software the place where the payloads are within the queue for clients that skip the update to the 'write_index' member. Change-Id: I2881fa49bef4e49691eb6049830f9dc8dc8fa425 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 6 +++ hw_fence/include/hw_fence_drv_utils.h | 16 +++++++ hw_fence/src/hw_fence_drv_priv.c | 18 +++++--- hw_fence/src/hw_fence_drv_utils.c | 60 +++++++++++++++++---------- hw_fence/src/msm_hw_fence.c | 2 + 5 files changed, 75 insertions(+), 27 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 2645fbc638..a59b48f2e5 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -175,6 +175,8 @@ enum payload_type { * @ipc_client_pid: physical id of the ipc client for this hw fence driver client * @update_rxq: bool to indicate if client uses rx-queue * @send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences + * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence + * driver and hfi_header->tx_wm is updated instead * @wait_queue: wait queue for the validation clients * @val_signal: doorbell flag to signal the validation clients in the wait queue */ @@ -188,6 +190,7 @@ struct msm_hw_fence_client { int ipc_client_pid; bool update_rxq; bool send_ipc; + bool skip_txq_wr_idx; #if IS_ENABLED(CONFIG_DEBUG_FS) wait_queue_head_t wait_queue; atomic_t val_signal; @@ -241,12 +244,15 @@ struct msm_hw_fence_dbg_data { * @mem_size: size of memory allocated for client queues * @start_offset: start offset of client queue memory region, from beginning of carved-out memory * allocation for hw fence driver + * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence + * driver and hfi_header->tx_wm is updated instead */ struct hw_fence_client_queue_size_desc { u32 queues_num; u32 queue_entries; u32 mem_size; u32 start_offset; + bool skip_txq_wr_idx; }; /** diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 6d9cd9627d..756f07b2bf 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -51,6 +51,8 @@ enum hw_fence_mem_reserve { * two (for both Tx and Rx Queues) * @queue_entries: number of entries per client queue of given client type * @mem_size: size of memory allocated for client queue(s) per client + * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence + * driver and hfi_header->tx_wm is updated instead */ struct hw_fence_client_type_desc { char *name; @@ -60,6 +62,7 @@ struct hw_fence_client_type_desc { u32 queues_num; u32 queue_entries; u32 mem_size; + bool skip_txq_wr_idx; }; /** @@ -170,4 +173,17 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data, enum hw_fence_client_id client_id); +/** + * hw_fence_utils_skips_txq_wr_index() - Returns bool to indicate if client Tx Queue write_index + * is not updated in hw fence driver. Instead, + * hfi_header->tx_wm tracks where payload is written within + * the queue. + * + * @drv_data: driver data + * @client_id: hw fence driver client id + * + * Returns: true if hw fence driver skips update to client tx queue write_index, false otherwise + */ +bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id); + #endif /* __HW_FENCE_DRV_UTILS_H */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index ce7546de69..21fe8822a0 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -248,6 +248,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, bool lock_client = false; u32 lock_idx; u64 timestamp; + u32 *wr_ptr; int ret = 0; if (queue_type >= @@ -269,6 +270,12 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, return -EINVAL; } + /* if skipping update txq wr_index, then use hfi_header->tx_wm instead */ + if (queue_type == (HW_FENCE_TX_QUEUE - 1) && hw_fence_client->skip_txq_wr_idx) + wr_ptr = &hfi_header->tx_wm; + else + wr_ptr = &hfi_header->write_index; + /* * We need to lock the client if there is an Rx Queue update, since that * is the only time when HW Fence driver can have a race condition updating @@ -294,11 +301,12 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, /* Get read and write index */ read_idx = readl_relaxed(&hfi_header->read_index); - write_idx = readl_relaxed(&hfi_header->write_index); + write_idx = readl_relaxed(wr_ptr); - HWFNC_DBG_Q("wr client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d q:0x%pK type:%d\n", - hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, - read_idx, write_idx, queue, queue_type); + HWFNC_DBG_Q("wr client:%d r_ptr:0x%pK w_ptr:0x%pK r_idx:%d w_idx:%d q:0x%pK type:%d s:%s\n", + hw_fence_client->client_id, &hfi_header->read_index, wr_ptr, + read_idx, write_idx, queue, queue_type, + hw_fence_client->skip_txq_wr_idx ? "true" : "false"); /* Check queue to make sure message will fit */ q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) : @@ -351,7 +359,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, wmb(); /* update the write index */ - writel_relaxed(to_write_idx, &hfi_header->write_index); + writel_relaxed(to_write_idx, wr_ptr); /* update memory for the index */ wmb(); diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index d530950785..2ae198fbfd 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -58,9 +58,9 @@ * struct hw_fence_client_types - Table describing all supported client types, used to parse * device-tree properties related to client queue size. * - * The fields name, init_id, and max_clients_num are constants. Default values for clients_num and - * queues_num are provided in this table, and clients_num, queues_num, and queue_entries can be read - * from device-tree. + * The fields name, init_id, and max_clients_num are constants. Default values for clients_num, + * queues_num, and skip_txq_wr_idx are provided in this table, and clients_num, queues_num, + * queue_entries, and skip_txq_wr_idx can be read from device-tree. * * If a value for queue entries is not parsed for the client type, then the default number of client * queue entries (parsed from device-tree) is used. @@ -68,29 +68,30 @@ * Notes: * 1. Client types must be in the same order as client_ids within the enum 'hw_fence_client_id'. * 2. Each HW Fence client ID must be described by one of the client types in this table. - * 3. A new client type must set: name, init_id, max_clients_num, clients_num, and queues_num. + * 3. A new client type must set: name, init_id, max_clients_num, clients_num, queues_num, and + * skip_txq_wr_idx. * 4. Either constant HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE or HW_FENCE_MAX_CLIENT_TYPE_STATIC must * be incremented as appropriate for new client types. */ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = { {"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU, - HW_FENCE_CLIENT_QUEUES, 0, 0}, + HW_FENCE_CLIENT_QUEUES, 0, 0, false}, {"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU, - HW_FENCE_CLIENT_QUEUES, 0, 0}, + HW_FENCE_CLIENT_QUEUES, 0, 0, false}, {"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL, - HW_FENCE_CLIENT_QUEUES, 0, 0}, + HW_FENCE_CLIENT_QUEUES, 0, 0, false}, {"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, - HW_FENCE_CLIENT_QUEUES, 0, 0}, + HW_FENCE_CLIENT_QUEUES, 0, 0, false}, {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, - HW_FENCE_CLIENT_QUEUES, 0, 0}, - {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, - {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, - {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, - {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, - {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, - {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, - {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, - {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, + HW_FENCE_CLIENT_QUEUES, 0, 0, false}, + {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, + {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, + {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, + {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, + {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, + {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, + {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, + {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, }; static void _lock(uint64_t *wait) @@ -594,13 +595,13 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da struct hw_fence_client_type_desc *desc) { char name[31]; - u32 tmp[3]; + u32 tmp[4]; u32 queue_size; int ret; /* parse client queue property from device-tree */ snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s", desc->name); - ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 3); + ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 4); if (ret) { HWFNC_DBG_INIT("missing %s client queue entry or invalid ret:%d\n", desc->name, ret); @@ -609,6 +610,12 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da desc->clients_num = tmp[0]; desc->queues_num = tmp[1]; desc->queue_entries = tmp[2]; + + if (tmp[3] > 1) { + HWFNC_ERR("%s invalid skip_txq_wr_idx prop:%lu\n", desc->name, tmp[3]); + return -EINVAL; + } + desc->skip_txq_wr_idx = tmp[3]; } if (desc->clients_num > desc->max_clients_num || !desc->queues_num || @@ -642,8 +649,9 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da return -EINVAL; } - HWFNC_DBG_INIT("%s: clients=%lu q_num=%lu q_entries=%lu mem_sz=%lu\n", desc->name, - desc->clients_num, desc->queues_num, desc->queue_entries, desc->mem_size); + HWFNC_DBG_INIT("%s: clients=%lu q_num=%lu q_entries=%lu mem_sz=%lu skips_wr_ptr:%s\n", + desc->name, desc->clients_num, desc->queues_num, desc->queue_entries, + desc->mem_size, desc->skip_txq_wr_idx ? "true" : "false"); return 0; } @@ -700,7 +708,7 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data) drv_data->hw_fence_client_queue_size[client_id] = (struct hw_fence_client_queue_size_desc) {desc->queues_num, desc->queue_entries, desc->mem_size, - start_offset}; + start_offset, desc->skip_txq_wr_idx}; HWFNC_DBG_INIT("%s client_id_ext:%lu client_id:%lu start_offset:%lu\n", desc->name, client_id_ext, client_id, start_offset); start_offset += desc->mem_size; @@ -919,3 +927,11 @@ enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver return client_id_priv; } + +bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id) +{ + if (!drv_data || client_id >= drv_data->clients_num) + return false; + + return drv_data->hw_fence_client_queue_size[client_id].skip_txq_wr_idx; +} diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 30eaf25c70..e81a4dd457 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -93,6 +93,8 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, } hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id); + hw_fence_client->skip_txq_wr_idx = hw_fence_utils_skips_txq_wr_idx(hw_fence_drv_data, + client_id); /* Alloc Client HFI Headers and Queues */ ret = hw_fence_alloc_client_resources(hw_fence_drv_data, From e0bf897e1fcf09b79da3af6ed41a461fefa9f2f4 Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 24 Oct 2022 12:05:09 -0700 Subject: [PATCH 045/166] mm-drivers: hw_fence: update ipc protocol to fence for pineapple Starting pineapple, fence protocol is used for hw fence driver. Change-Id: I87435128c22aeb338dfcda38f0196e04dc9eb70b Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_ipc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index 7e46e08f53..48317cafa8 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -361,7 +361,7 @@ static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 case HW_FENCE_IPCC_HW_REV_203: drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID; - drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE; /* Fence */ + drv_data->protocol_id = HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE; /* Fence */ ret = _hw_fence_ipcc_init_map_with_configurable_clients(drv_data, hw_fence_clients_ipc_map_v2); HWFNC_DBG_INIT("ipcc protocol_id: Pineapple\n"); From fbde79b1181e3634b3689bb74873e24237213290 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 16 Nov 2022 16:41:49 -0800 Subject: [PATCH 046/166] mm-drivers: hw-fence: extend ioctl support to create fence_array Current HW Fence validation framework limits the creation of a HW Fence- array to fences from a single parent client. This change adds support to hw_fence_create_array IOCTL so a HW fence-array can be created from fences of different clients. Change-Id: I6ce801f51747fcab503fc23c1ae981b107d4f315 Signed-off-by: Grace An --- hw_fence/src/hw_fence_ioctl.c | 102 ++++++++++++---------------------- 1 file changed, 34 insertions(+), 68 deletions(-) diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 02942dc3ea..379dbf971d 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -71,16 +71,16 @@ struct hw_fence_sync_create_data { /** * struct hw_fence_array_sync_create_data - data used in creating multiple fences. - * @seqno: array of sequence numbers used to create fences. - * @num_fences: number of fences to be created. - * @fence: return the fd of the new sync_file with the created fence. - * @hash: array of fence hash + * @seqno: sequence number used to create fence array. + * @num_fences: number of fence fds received. + * @fences: array of fence fds. + * @fence_array_fd: fd of fence array. */ struct hw_fence_array_sync_create_data { - u64 seqno[HW_FENCE_ARRAY_SIZE]; + u64 seqno; int num_fences; - __s32 fence; - u64 hash[HW_FENCE_ARRAY_SIZE]; + u64 fences[HW_FENCE_ARRAY_SIZE]; + __s32 fence_array_fd; }; /** @@ -343,16 +343,22 @@ static long hw_sync_ioctl_destroy_fence(struct hw_sync_obj *obj, unsigned long a return 0; } +static void _put_child_fences(int i, struct dma_fence **fences) +{ + int fence_idx; + + for (fence_idx = i; fence_idx >= 0 ; fence_idx--) + dma_fence_put(fences[i]); +} + static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned long arg) { struct dma_fence_array *fence_array; struct hw_fence_array_sync_create_data data; struct dma_fence **fences = NULL; - struct msm_hw_fence_create_params params; struct sync_file *sync_file; - spinlock_t **fence_lock = NULL; int num_fences, i, fd, ret; - u64 hash; + struct hw_dma_fence *fence; if (!_is_valid_client(obj)) { return -EINVAL; @@ -370,80 +376,43 @@ static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned l num_fences, HW_FENCE_ARRAY_SIZE); return -EINVAL; } - fence_lock = kcalloc(num_fences, sizeof(*fence_lock), GFP_KERNEL); - if (!fence_lock) - return -ENOMEM; fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); if (!fences) { - kfree(fence_lock); return -ENOMEM; } - /* - * Create the array of dma fences - * This API takes seqno[num_fences] as the seqno for the fence-array - * and from 0 to (num_fences - 1) for the fences in the array. - */ for (i = 0; i < num_fences; i++) { - struct hw_dma_fence *dma_fence; - - fence_lock[i] = kzalloc(sizeof(spinlock_t), GFP_KERNEL); - if (!fence_lock[i]) { - _cleanup_fences(i, fences, fence_lock); - return -ENOMEM; + fd = data.fences[i]; + if (fd <= 0) { + kfree(fences); + return -EINVAL; } - - dma_fence = kzalloc(sizeof(*dma_fence), GFP_KERNEL); - if (!dma_fence) { - _cleanup_fences(i, fences, fence_lock); - return -ENOMEM; + fence = (struct hw_dma_fence *)_hw_sync_get_fence(fd); + if (!fence) { + _put_child_fences(i-1, fences); + kfree(fences); + return -EINVAL; } - fences[i] = &dma_fence->base; - - spin_lock_init(fence_lock[i]); - dma_fence_init(fences[i], &hw_fence_dbg_ops, fence_lock[i], - obj->context, data.seqno[i]); + fences[i] = &fence->base; } /* create the fence array from array of dma fences */ - fence_array = dma_fence_array_create(num_fences, fences, obj->context, data.seqno[i], 0); + fence_array = dma_fence_array_create(num_fences, fences, obj->context, data.seqno, 0); if (!fence_array) { HWFNC_ERR("Error creating fence_array\n"); - _cleanup_fences(num_fences - 1, fences, fence_lock); + /* decrease the refcount incremented for each child fences */ + for (i = 0; i < num_fences; i++) + dma_fence_put(fences[i]); + kfree(fences); return -EINVAL; } - /* create hw fences */ - for (i = 0; i < num_fences; i++) { - params.fence = fences[i]; - params.handle = &hash; - - ret = msm_hw_fence_create(obj->client_handle, ¶ms); - if (ret) { - HWFNC_ERR("Error creating HW fence\n"); - dma_fence_put(&fence_array->base); - /* - * free array of pointers, no need to call kfree in 'fences', - * since that is released from the fence-array release api - */ - kfree(fence_lock); - kfree(fence_array); - return -EINVAL; - } - - /* keep handle in dma_fence, to destroy hw-fence during release */ - to_hw_dma_fence(fences[i])->client_handle = obj->client_handle; - data.hash[i] = hash; - } - /* create fd */ fd = get_unused_fd_flags(0); - if (fd < 0) { + if (fd <= 0) { HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id); dma_fence_put(&fence_array->base); - kfree(fence_lock); - kfree(fence_array); return fd; } @@ -451,7 +420,6 @@ static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned l if (sync_file == NULL) { HWFNC_ERR("couldn't create fence fd, %d\n", fd); dma_fence_put(&fence_array->base); - kfree(fence_lock); kfree(fence_array); ret = -EINVAL; goto exit; @@ -460,12 +428,10 @@ static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned l /* Decrement the refcount that sync_file_create increments */ dma_fence_put(&fence_array->base); - data.fence = fd; + data.fence_array_fd = fd; if (copy_to_user((void __user *)arg, &data, sizeof(data))) { fput(sync_file->file); dma_fence_put(&fence_array->base); - kfree(fence_lock); - kfree(fence_array); ret = -EFAULT; goto exit; } @@ -492,7 +458,7 @@ static long hw_sync_ioctl_destroy_fence_array(struct hw_sync_obj *obj, unsigned if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; - fd = data.fence; + fd = data.fence_array_fd; fence = (struct dma_fence *)_hw_sync_get_fence(fd); if (!fence) { HWFNC_ERR("Invalid fence fd: %d\n", fd); From bb0f9e965f406d00229703a1b93dc150f3280843 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 30 Nov 2022 09:06:24 -0800 Subject: [PATCH 047/166] mm-drivers: hw_fence: share hw fence driver mem pool always When hw fencing is disabled via kernel command line argument, allow probing of hw fence driver and perform memory sharing during probe. This ensures that the carved out memory region for hw fences is always shared with hypervisor regardless of hw-fencing feature enablement. Change-Id: I7723fd61860e0d6b8dc374a054c8519d98d700a6 Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence.c | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index e81a4dd457..dcbe4cd80c 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -500,15 +500,26 @@ static int msm_hw_fence_probe_init(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, hw_fence_drv_data); hw_fence_drv_data->dev = &pdev->dev; - /* Initialize HW Fence Driver resources */ - rc = hw_fence_init(hw_fence_drv_data); - if (rc) - goto error; + if (hw_fence_driver_enable) { + /* Initialize HW Fence Driver resources */ + rc = hw_fence_init(hw_fence_drv_data); + if (rc) + goto error; - mutex_init(&hw_fence_drv_data->clients_register_lock); + mutex_init(&hw_fence_drv_data->clients_register_lock); - /* set ready ealue so clients can register */ - hw_fence_drv_data->resources_ready = true; + /* set ready value so clients can register */ + hw_fence_drv_data->resources_ready = true; + } else { + /* Allocate hw fence driver mem pool and share it with HYP */ + rc = hw_fence_utils_alloc_mem(hw_fence_drv_data); + if (rc) { + HWFNC_ERR("failed to alloc base memory\n"); + goto error; + } + + HWFNC_DBG_INFO("hw fence driver not enabled\n"); + } HWFNC_DBG_H("-\n"); @@ -534,11 +545,6 @@ static int msm_hw_fence_probe(struct platform_device *pdev) return -EINVAL; } - if (!hw_fence_driver_enable) { - HWFNC_DBG_INFO("hw fence driver not enabled\n"); - return -EOPNOTSUPP; - } - if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-hw-fence")) rc = msm_hw_fence_probe_init(pdev); if (rc) From 13b4e1270c82e1b6c002782a9084a649298055c2 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Mon, 14 Nov 2022 11:50:04 -0800 Subject: [PATCH 048/166] mm-drivers: hw_fence: reset queues during client reset This change make sure that the write_idx and read_idx of the client hfi queues are reset during the call to msm_hw_fence_reset_client. Change-Id: Iaf94865ddf78ed8e19de509e3ee6176d03c5301c Signed-off-by: Ingrid Gallardo --- hw_fence/include/hw_fence_drv_priv.h | 2 ++ hw_fence/src/hw_fence_drv_priv.c | 43 ++++++++++++++++++++++++++++ hw_fence/src/msm_hw_fence.c | 4 ++- 3 files changed, 48 insertions(+), 1 deletion(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index a59b48f2e5..139d9288da 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -473,6 +473,8 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client); void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client); +void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client); int hw_fence_create(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash); diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 21fe8822a0..ed7ce14bda 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1430,6 +1430,49 @@ static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, } } +void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client) +{ + struct msm_hw_fence_hfi_queue_header *hfi_header; + struct msm_hw_fence_queue *queue; + u32 rd_idx, wr_idx, lock_idx; + + queue = &hw_fence_client->queues[HW_FENCE_TX_QUEUE - 1]; + hfi_header = queue->va_header; + + /* For the client TxQ: set the read-index same as last write that was done by the client */ + mb(); /* make sure data is ready before read */ + wr_idx = readl_relaxed(&hfi_header->write_index); + writel_relaxed(wr_idx, &hfi_header->read_index); + wmb(); /* make sure data is updated after write the index*/ + + /* For the client RxQ: set the write-index same as last read done by the client */ + if (hw_fence_client->update_rxq) { + lock_idx = hw_fence_client->client_id - 1; + + if (lock_idx >= drv_data->client_lock_tbl_cnt) { + HWFNC_ERR("cannot reset rxq, lock for client id:%d exceed max:%d\n", + hw_fence_client->client_id, drv_data->client_lock_tbl_cnt); + return; + } + HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx); + + /* lock the client rx queue to update */ + GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 1); + + queue = &hw_fence_client->queues[HW_FENCE_RX_QUEUE - 1]; + hfi_header = queue->va_header; + + mb(); /* make sure data is ready before read */ + rd_idx = readl_relaxed(&hfi_header->read_index); + writel_relaxed(rd_idx, &hfi_header->write_index); + wmb(); /* make sure data is updated after write the index */ + + /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0); + } +} + int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, u32 reset_flags) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index e81a4dd457..6e1bde5d53 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -389,11 +389,13 @@ int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) hw_fence_client = (struct msm_hw_fence_client *)client_handle; hw_fences_tbl = hw_fence_drv_data->hw_fences_tbl; - HWFNC_DBG_L("reset fences for client:%d\n", hw_fence_client->client_id); + HWFNC_DBG_L("reset fences and queues for client:%d\n", hw_fence_client->client_id); for (i = 0; i < hw_fence_drv_data->hw_fences_tbl_cnt; i++) hw_fence_utils_cleanup_fence(hw_fence_drv_data, hw_fence_client, &hw_fences_tbl[i], i, reset_flags); + hw_fence_utils_reset_queues(hw_fence_drv_data, hw_fence_client); + return 0; } EXPORT_SYMBOL(msm_hw_fence_reset_client); From 4f59f5ce0294f07672dba504c244a102065a8a72 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 10 Jan 2023 15:10:31 -0800 Subject: [PATCH 049/166] mm-drivers: hw_fence: switch to qcom_scm_assign_mem from hyp_assign_phys Switch to upstream friendly qcom_scm_assign_mem from hyp_assign_phys. Change-Id: I01c6b93698fea094cf89926f3168466ba14061bc Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_utils.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 2ae198fbfd..5ce438a921 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -8,6 +8,7 @@ #include #include #include +#include #include #include "hw_fence_drv_priv.h" @@ -339,18 +340,19 @@ int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data) static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, gh_vmid_t self, gh_vmid_t peer) { - u32 src_vmlist[1] = {self}; - int src_perms[2] = {PERM_READ | PERM_WRITE | PERM_EXEC}; - int dst_vmlist[2] = {self, peer}; - int dst_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE}; + struct qcom_scm_vmperm src_vmlist[] = {{self, PERM_READ | PERM_WRITE | PERM_EXEC}}; + struct qcom_scm_vmperm dst_vmlist[] = {{self, PERM_READ | PERM_WRITE}, + {peer, PERM_READ | PERM_WRITE}}; + int srcvmids = BIT(src_vmlist[0].vmid); + int dstvmids = BIT(dst_vmlist[0].vmid) | BIT(dst_vmlist[1].vmid); struct gh_acl_desc *acl; struct gh_sgl_desc *sgl; int ret; - ret = hyp_assign_phys(drv_data->res.start, resource_size(&drv_data->res), - src_vmlist, 1, dst_vmlist, dst_perms, 2); + ret = qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res), &srcvmids, + dst_vmlist, ARRAY_SIZE(dst_vmlist)); if (ret) { - HWFNC_ERR("%s: hyp_assign_phys failed addr=%x size=%u err=%d\n", + HWFNC_ERR("%s: qcom_scm_assign_mem failed addr=%x size=%u err=%d\n", __func__, drv_data->res.start, drv_data->size, ret); return ret; } @@ -379,9 +381,8 @@ static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, HWFNC_ERR("%s: gh_rm_mem_share failed addr=%x size=%u err=%d\n", __func__, drv_data->res.start, drv_data->size, ret); /* Attempt to give resource back to HLOS */ - hyp_assign_phys(drv_data->res.start, resource_size(&drv_data->res), - dst_vmlist, 2, - src_vmlist, src_perms, 1); + qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res), + &dstvmids, src_vmlist, ARRAY_SIZE(src_vmlist)); ret = -EPROBE_DEFER; } From 1db686776229c73c2fe7158e4b5643c268b24a9d Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 5 Jan 2023 16:05:37 -0800 Subject: [PATCH 050/166] mm-drivers: hw_fence: change memory mapping of hwfence shared memory Currently, carved-out memory region is mapped as IO. Change mapping to normal memory. Change-Id: I1eca1067e30e2a6e39969c003dcce9ea0f9c47fd Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_utils.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 2ae198fbfd..c2d598ad27 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -471,7 +471,7 @@ int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) return -EINVAL; } - drv_data->io_mem_base = devm_ioremap(dev, drv_data->res.start, + drv_data->io_mem_base = devm_ioremap_wc(dev, drv_data->res.start, resource_size(&drv_data->res)); if (!drv_data->io_mem_base) { HWFNC_ERR("ioremap failed!\n"); From 0219a76630381851656354bdf9640eb0b8f29829 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Tue, 24 Jan 2023 11:02:46 -0800 Subject: [PATCH 051/166] mm-drivers: hw_fence: silently fail registration when feature disabled Current hw-fencing feature is disabled by default through kernel command line argument, therefore it is expected that clients receive an error when trying to register a client while feature is disabled. This change silence any print error messages during the clients registration when feature is disabled. Change-Id: Ie57adb52a975f9541e485039a582407cf21c11cd Signed-off-by: Ingrid Gallardo --- hw_fence/src/msm_hw_fence.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index dcbe4cd80c..424c84662b 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -24,6 +24,9 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, enum hw_fence_client_id client_id; int ret; + if (!hw_fence_driver_enable) + return ERR_PTR(-ENODEV); + HWFNC_DBG_H("++ client_id_ext:%d\n", client_id_ext); if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { From e443a11c833a826142d45035fd4bb4656fbfc45a Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 9 Feb 2023 11:46:49 -0800 Subject: [PATCH 052/166] mm-drivers: hw_fence: fix ioctl support for ipcc signaling Currently, the ioctl to trigger ipcc signals uses the client virtual id as the tx client and the client physical id as the rx client. This should be reversed to correctly perform ipcc signaling. Change-Id: I61e7ec0e4bfd63f2d7e1cd1dd4e62dd4f6a82143 Signed-off-by: Grace An --- hw_fence/src/hw_fence_ioctl.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 379dbf971d..7c5b141faf 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -542,8 +542,8 @@ static long hw_sync_ioctl_fence_signal(struct hw_sync_obj *obj, unsigned long ar if (signal_id < 0) return -EINVAL; - tx_client = hw_fence_client->ipc_client_vid; - rx_client = hw_fence_client->ipc_client_pid; + tx_client = hw_fence_client->ipc_client_pid; + rx_client = hw_fence_client->ipc_client_vid; ret = msm_hw_fence_trigger_signal(obj->client_handle, tx_client, rx_client, signal_id); if (ret) { HWFNC_ERR("hw fence trigger signal has failed\n"); From 90268c94ab312403ba64a443df9040c525290954 Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 10 Feb 2023 12:16:09 -0800 Subject: [PATCH 053/166] mm-driver: hw_fence: resolve compilation failure with data types Latest update to qcom_scm_assign_mem API changed input data types. Change data types in HW Fence Driver to ensure compatibility with newest API. Change-Id: Ia25bb9e129cf67ec99e18c60407ac997cf0d6e3f Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_utils.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 33ab483f6b..fa407134fb 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include "hw_fence_drv_priv.h" @@ -343,12 +344,17 @@ static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, struct qcom_scm_vmperm src_vmlist[] = {{self, PERM_READ | PERM_WRITE | PERM_EXEC}}; struct qcom_scm_vmperm dst_vmlist[] = {{self, PERM_READ | PERM_WRITE}, {peer, PERM_READ | PERM_WRITE}}; - int srcvmids = BIT(src_vmlist[0].vmid); - int dstvmids = BIT(dst_vmlist[0].vmid) | BIT(dst_vmlist[1].vmid); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + u64 srcvmids, dstvmids; +#else + unsigned int srcvmids, dstvmids; +#endif struct gh_acl_desc *acl; struct gh_sgl_desc *sgl; int ret; + srcvmids = BIT(src_vmlist[0].vmid); + dstvmids = BIT(dst_vmlist[0].vmid) | BIT(dst_vmlist[1].vmid); ret = qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res), &srcvmids, dst_vmlist, ARRAY_SIZE(dst_vmlist)); if (ret) { From 4e5524c85fdf61b768b54646ac161b84074e5b44 Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 9 Jan 2023 15:22:04 -0800 Subject: [PATCH 054/166] mm-drivers: hw_fence: trigger signal for validation signaled fences When validation clients register to wait on already signaled fences, the hw fence driver must signal the client wait and wake up waiting validation clients. Change-Id: I3e0f7abfbb055d8e5fbb5afd5fc8b88991c95aee Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 4 +-- hw_fence/src/hw_fence_drv_ipc.c | 44 ++++++++++++++-------------- hw_fence/src/hw_fence_drv_priv.c | 8 ++++- 3 files changed, 31 insertions(+), 25 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 139d9288da..9ed047208b 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __HW_FENCE_DRV_INTERNAL_H @@ -108,7 +108,7 @@ enum hw_fence_loopback_id { HW_FENCE_LOOPBACK_DPU_CTL_5, HW_FENCE_LOOPBACK_GFX_CTX_0, #if IS_ENABLED(CONFIG_DEBUG_FS) - HW_FENCE_LOOPBACK_VAL_0, + HW_FENCE_LOOPBACK_VAL_0 = HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_LOOPBACK_VAL_1, HW_FENCE_LOOPBACK_VAL_2, HW_FENCE_LOOPBACK_VAL_3, diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index 48317cafa8..a2289fc8ee 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -53,13 +53,13 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_IPC_MAP_ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 18, false, true},/*ctl4*/ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 19, false, true},/*ctl5*/ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, true},/* val0*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, true},/* val1*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, true},/* val2*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, true},/* val3*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, true},/* val4*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, true},/* val5*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, true},/* val6*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, false},/*val0*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, false},/*val1*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, false},/*val2*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, false},/*val3*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, false},/*val4*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, false},/*val5*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, false},/*val6*/ #endif /* CONFIG_DEBUG_FS */ }; @@ -81,13 +81,13 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_IPC_MAP_MAX] = {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 4, false, true},/* ctl4 */ {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 5, false, true},/* ctl5 */ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, true},/* val0*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, true},/* val1*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, true},/* val2*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, true},/* val3*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, true},/* val4*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, true},/* val5*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, true},/* val6*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, false},/*val0*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, false},/*val1*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, false},/*val2*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, false},/*val3*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, false},/*val4*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, false},/*val5*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, false},/*val6*/ #else {0, 0, 0, false, false}, /* val0 */ {0, 0, 0, false, false}, /* val1 */ @@ -122,13 +122,13 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_IPC_MAP_MAX] {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 4, false, true},/* ctl4 */ {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 5, false, true},/* ctl5 */ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, true},/* val0*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, true},/* val1*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, true},/* val2*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, true},/* val3*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, true},/* val4*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, true},/* val5*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, true},/* val6*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, false},/*val0*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, false},/*val1*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, false},/*val2*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, false},/*val3*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, false},/*val4*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, false},/*val5*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, false},/*val6*/ #else {0, 0, 0, false, false}, /* val0 */ {0, 0, 0, false, false}, /* val1 */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index ed7ce14bda..f69fd408e6 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -1123,6 +1123,12 @@ static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, if (hw_fence_client->send_ipc) hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, hw_fence_client->ipc_signal_id); + +#if IS_ENABLED(CONFIG_DEBUG_FS) + if (hw_fence_client->client_id >= HW_FENCE_CLIENT_ID_VAL0 + && hw_fence_client->client_id <= HW_FENCE_CLIENT_ID_VAL6) + process_validation_client_loopback(drv_data, hw_fence_client->client_id); +#endif /* CONFIG_DEBUG_FS */ } static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data, From 5eec9ba76cc7e41de27d40e3422f5a6184e1c9d6 Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 3 Mar 2023 15:38:06 -0800 Subject: [PATCH 055/166] mm-drivers: hw_fence: add header file for translation to synx api Add header file for synx translation layer in hwfence driver. Change-Id: Ie0ec426292cda180159d8572a3ace474804d3af5 Signed-off-by: Grace An --- .../include/msm_hw_fence_synx_translation.h | 220 ++++++++++++++++++ 1 file changed, 220 insertions(+) create mode 100644 hw_fence/include/msm_hw_fence_synx_translation.h diff --git a/hw_fence/include/msm_hw_fence_synx_translation.h b/hw_fence/include/msm_hw_fence_synx_translation.h new file mode 100644 index 0000000000..1235d7639e --- /dev/null +++ b/hw_fence/include/msm_hw_fence_synx_translation.h @@ -0,0 +1,220 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __MSM_HW_FENCE_SYNX_TRANSLATION_H +#define __MSM_HW_FENCE_SYNX_TRANSLATION_H + +#include + +#define SYNX_HW_FENCE_CLIENT_START 1024 +#define SYNX_HW_FENCE_CLIENT_END 4096 +#define SYNX_MAX_SIGNAL_PER_CLIENT 64 + +extern bool hw_fence_driver_enable; + +/** + * enum synx_client_id : Unique identifier of the supported clients + * + * @SYNX_CLIENT_HW_FENCE_GFX_CTX0 : HW Fence GFX Client 0 + * @SYNX_CLIENT_HW_FENCE_IPE_CTX0 : HW Fence IPE Client 0 + * @SYNX_CLIENT_HW_FENCE_VID_CTX0 : HW Fence Video Client 0 + * @SYNX_CLIENT_HW_FENCE_DPU0_CTL0 : HW Fence DPU0 Client 0 + * @SYNX_CLIENT_HW_FENCE_DPU1_CTL0 : HW Fence DPU1 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE0_CTX0 : HW Fence IFE0 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE1_CTX0 : HW Fence IFE1 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE2_CTX0 : HW Fence IFE2 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE3_CTX0 : HW Fence IFE3 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE4_CTX0 : HW Fence IFE4 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE5_CTX0 : HW Fence IFE5 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE6_CTX0 : HW Fence IFE6 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE7_CTX0 : HW Fence IFE7 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE8_CTX0 : HW Fence IFE8 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE9_CTX0 : HW Fence IFE9 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE10_CTX0 : HW Fence IFE10 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE11_CTX0 : HW Fence IFE11 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE12_CTX0 : HW Fence IFE12 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE13_CTX0 : HW Fence IFE13 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE14_CTX0 : HW Fence IFE14 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE15_CTX0 : HW Fence IFE15 Client 0 + */ +enum synx_hwfence_client_id { + SYNX_CLIENT_HW_FENCE_GFX_CTX0 = SYNX_HW_FENCE_CLIENT_START, + SYNX_CLIENT_HW_FENCE_IPE_CTX0 = SYNX_CLIENT_HW_FENCE_GFX_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_VID_CTX0 = SYNX_CLIENT_HW_FENCE_IPE_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_DPU0_CTL0 = SYNX_CLIENT_HW_FENCE_VID_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_DPU1_CTL0 = SYNX_CLIENT_HW_FENCE_DPU0_CTL0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE0_CTX0 = SYNX_CLIENT_HW_FENCE_DPU1_CTL0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE1_CTX0 = SYNX_CLIENT_HW_FENCE_IFE0_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE2_CTX0 = SYNX_CLIENT_HW_FENCE_IFE1_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE3_CTX0 = SYNX_CLIENT_HW_FENCE_IFE2_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE4_CTX0 = SYNX_CLIENT_HW_FENCE_IFE3_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE5_CTX0 = SYNX_CLIENT_HW_FENCE_IFE4_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE6_CTX0 = SYNX_CLIENT_HW_FENCE_IFE5_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE7_CTX0 = SYNX_CLIENT_HW_FENCE_IFE6_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE8_CTX0 = SYNX_CLIENT_HW_FENCE_IFE7_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE9_CTX0 = SYNX_CLIENT_HW_FENCE_IFE8_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE10_CTX0 = SYNX_CLIENT_HW_FENCE_IFE9_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE11_CTX0 = SYNX_CLIENT_HW_FENCE_IFE10_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE12_CTX0 = SYNX_CLIENT_HW_FENCE_IFE11_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE13_CTX0 = SYNX_CLIENT_HW_FENCE_IFE12_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE14_CTX0 = SYNX_CLIENT_HW_FENCE_IFE13_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE15_CTX0 = SYNX_CLIENT_HW_FENCE_IFE14_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_MAX = SYNX_HW_FENCE_CLIENT_END, +}; + +#if IS_ENABLED(CONFIG_QTI_HW_FENCE) +/** + * synx_hwfence_initialize - Initializes a new client session + * + * @param params : Pointer to session init params + * + * @return Client session pointer on success. NULL or error in case of failure. + */ +struct synx_session *synx_hwfence_initialize(struct synx_initialization_params *params); + +/** + * synx_hwfence_uninitialize - Destroys the client session + * + * @param session : Session ptr (returned from synx_initialize) + * + * @return Status of operation. SYNX_SUCCESS in case of success. + */ +int synx_hwfence_uninitialize(struct synx_session *session); + +/** + * synx_hwfence_create - Creates a synx object + * + * Creates a new synx obj and returns the handle to client. + * + * @param session : Session ptr (returned from synx_initialize) + * @param params : Pointer to create params + * + * @return Status of operation. SYNX_SUCCESS in case of success. + * -SYNX_INVALID will be returned if params were invalid. + * -SYNX_NOMEM will be returned if the kernel can't allocate space for + * synx object. + */ +int synx_hwfence_create(struct synx_session *session, struct synx_create_params *params); + +/** + * synx_hwfence_release - Release the synx object + * + * @param session : Session ptr (returned from synx_initialize) + * @param h_synx : Synx object handle to be destroyed + * + * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise. + */ +int synx_hwfence_release(struct synx_session *session, u32 h_synx); + +/** + * synx_hwfence_signal - Signals a synx object with the status argument. + * + * This function will signal the synx object referenced by h_synx + * and invoke any external binding synx objs. + * The status parameter will indicate whether the entity + * performing the signaling wants to convey an error case or a success case. + * + * @param session : Session ptr (returned from synx_initialize) + * @param h_synx : Synx object handle + * @param status : Status of signaling. + * Clients can send custom signaling status + * beyond SYNX_STATE_SIGNALED_MAX. + * + * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise. + */ +int synx_hwfence_signal(struct synx_session *session, u32 h_synx, enum synx_signal_status status); + +/** + * synx_hwfence_recover - Recover any possible handle leaks + * + * Function should be called on HW hang/reset to + * recover the Synx handles shared. This cleans up + * Synx handles held by the rest HW, and avoids + * potential resource leaks. + * + * Function does not destroy the session, but only + * recover synx handles belonging to the session. + * Synx session would still be active and clients + * need to destroy the session explicitly through + * synx_uninitialize API. + * + * @param id : Client ID of core to recover + * + * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise. + */ +int synx_hwfence_recover(enum synx_client_id id); + +/** + * synx_hwfence_import - Imports (looks up) synx object from given handle/fence + * + * Import subscribes the client session for notification on signal + * of handles/fences. + * + * @param session : Session ptr (returned from synx_initialize) + * @param params : Pointer to import params + * + * @return SYNX_SUCCESS upon success, -SYNX_INVAL if synx object is bad state + */ +int synx_hwfence_import(struct synx_session *session, struct synx_import_params *params); + +#else /* CONFIG_QTI_HW_FENCE */ +static inline struct synx_session *synx_hwfence_initialize( + struct synx_initialization_params *params) +{ + return ERR_PTR(-SYNX_INVALID); +} + +static inline int synx_hwfence_uninitialize(struct synx_session *session) +{ + return -SYNX_INVALID; +} + +static inline int synx_hwfence_create(struct synx_session *session, + struct synx_create_params *params) +{ + return -SYNX_INVALID; +} + +static inline int synx_hwfence_release(struct synx_session *session, u32 h_synx) +{ + return -SYNX_INVALID; +} + +static inline int synx_hwfence_signal(struct synx_session *session, u32 h_synx, + enum synx_signal_status status) +{ + return -SYNX_INVALID; +} + +static inline int synx_hwfence_recover(enum synx_client_id id) +{ + return -SYNX_INVALID; +} + +static inline int synx_hwfence_import(struct synx_session *session, + struct synx_import_params *params) +{ + return -SYNX_INVALID; +} + +#endif /* CONFIG_QTI_HW_FENCE */ +#endif /* __MSM_HW_FENCE_SYNX_TRANSLATION_H */ From 41b11c1d09b93c0b8e4687dd44a99468552f2489 Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 8 Dec 2022 15:40:49 -0800 Subject: [PATCH 056/166] mm-drivers: hw_fence: add implementation of translation to synx API Add implementation to translate the msm_hw_fence API into synx API. Change-Id: I5d0b7afcc297a4e3c8ec4ed9867831b5d2dfc3af Signed-off-by: Grace An --- hw_fence/Kbuild | 7 +- hw_fence/src/msm_hw_fence.c | 2 +- hw_fence/src/msm_hw_fence_synx_translation.c | 332 +++++++++++++++++++ 3 files changed, 338 insertions(+), 3 deletions(-) create mode 100644 hw_fence/src/msm_hw_fence_synx_translation.c diff --git a/hw_fence/Kbuild b/hw_fence/Kbuild index 2cf74d291b..55334e8b65 100644 --- a/hw_fence/Kbuild +++ b/hw_fence/Kbuild @@ -3,7 +3,9 @@ KDIR := $(TOP)/kernel_platform/msm-kernel include $(MSM_HW_FENCE_ROOT)/config/kalamammdrivers.conf LINUXINCLUDE += -include $(MSM_HW_FENCE_ROOT)/config/kalamammdriversconf.h \ - -I$(MSM_HW_FENCE_ROOT)hw_fence/include/ + -I$(MSM_HW_FENCE_ROOT)hw_fence/include/ \ + -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/msm/synx/ \ + -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/include/uapi/synx/media/ ifdef CONFIG_QTI_HW_FENCE obj-m += msm_hw_fence.o @@ -12,7 +14,8 @@ msm_hw_fence-y := src/msm_hw_fence.o \ src/hw_fence_drv_priv.o \ src/hw_fence_drv_utils.o \ src/hw_fence_drv_debug.o \ - src/hw_fence_drv_ipc.o + src/hw_fence_drv_ipc.o \ + src/msm_hw_fence_synx_translation.o msm_hw_fence-$(CONFIG_DEBUG_FS) += src/hw_fence_ioctl.o diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index c5531727f9..9904d934e6 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -15,7 +15,7 @@ #include "hw_fence_drv_ipc.h" struct hw_fence_driver_data *hw_fence_drv_data; -static bool hw_fence_driver_enable; +bool hw_fence_driver_enable; void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, struct msm_hw_fence_mem_addr *mem_descriptor) diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c new file mode 100644 index 0000000000..f35bfcd488 --- /dev/null +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include "msm_hw_fence_synx_translation.h" +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_debug.h" + +/** + * MAX_SUPPORTED_DPU0: + * Maximum number of dpu clients supported + */ +#define MAX_SUPPORTED_DPU0 (HW_FENCE_CLIENT_ID_CTL5 - HW_FENCE_CLIENT_ID_CTL0) + +static int to_synx_status(int hw_fence_status_code) +{ + int synx_status_code; + + switch (hw_fence_status_code) { + case 0: + synx_status_code = SYNX_SUCCESS; + break; + case -ENOMEM: + synx_status_code = -SYNX_NOMEM; + break; + case -EPERM: + synx_status_code = -SYNX_NOPERM; + break; + case -ETIMEDOUT: + synx_status_code = -SYNX_TIMEOUT; + break; + case -EALREADY: + synx_status_code = -SYNX_ALREADY; + break; + case -ENOENT: + synx_status_code = -SYNX_NOENT; + break; + case -EINVAL: + synx_status_code = -SYNX_INVALID; + break; + case -EBUSY: + synx_status_code = -SYNX_BUSY; + break; + default: + synx_status_code = hw_fence_status_code; + break; + } + + return synx_status_code; +} + +static enum hw_fence_client_id _get_hw_fence_client_id(enum synx_client_id synx_client_id) +{ + enum hw_fence_client_id hw_fence_client_id; + + switch ((int)synx_client_id) { + case SYNX_CLIENT_HW_FENCE_GFX_CTX0: + hw_fence_client_id = HW_FENCE_CLIENT_ID_CTX0; + break; + case SYNX_CLIENT_HW_FENCE_IPE_CTX0 ... SYNX_CLIENT_HW_FENCE_IPE_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT - 1: + hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IPE_CTX0 + + HW_FENCE_CLIENT_ID_IPE; + break; + case SYNX_CLIENT_HW_FENCE_VID_CTX0 ... SYNX_CLIENT_HW_FENCE_VID_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT - 1: + hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_VID_CTX0 + + HW_FENCE_CLIENT_ID_VPU; + break; + case SYNX_CLIENT_HW_FENCE_DPU0_CTL0 ... SYNX_CLIENT_HW_FENCE_DPU0_CTL0 + MAX_SUPPORTED_DPU0: + hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_DPU0_CTL0 + + HW_FENCE_CLIENT_ID_CTL0; + break; + case SYNX_CLIENT_HW_FENCE_IFE0_CTX0 ... SYNX_CLIENT_HW_FENCE_IFE7_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT - 1: + hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IFE0_CTX0 + + HW_FENCE_CLIENT_ID_IFE0; + break; + default: + HWFNC_ERR("Unsupported hw-fence client for synx_id:%d\n", synx_client_id); + hw_fence_client_id = HW_FENCE_CLIENT_MAX; + break; + } + + return hw_fence_client_id; +} + +static bool is_hw_fence_client(enum synx_client_id synx_client_id) +{ + return synx_client_id >= SYNX_HW_FENCE_CLIENT_START + && synx_client_id < SYNX_HW_FENCE_CLIENT_END; +} + +struct synx_session *synx_hwfence_initialize(struct synx_initialization_params *params) +{ + struct synx_session *session = NULL; + enum hw_fence_client_id client_id; + void *client_handle; + + if (!hw_fence_driver_enable) + return ERR_PTR(-SYNX_INVALID); + + if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->ptr)) { + HWFNC_ERR("invalid params:0x%pK params->ptr:0x%pK\n", params, + IS_ERR_OR_NULL(params) ? NULL : params->ptr); + return ERR_PTR(-SYNX_INVALID); + } + + client_id = _get_hw_fence_client_id(params->id); + if (!is_hw_fence_client(params->id) || client_id == HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Initializing session for invalid synx_id:%d\n", params->id); + return ERR_PTR(-SYNX_INVALID); + } + + session = kzalloc(sizeof(struct synx_session), GFP_KERNEL); + if (!session) + return ERR_PTR(-SYNX_NOMEM); + + client_handle = msm_hw_fence_register(client_id, + (struct msm_hw_fence_mem_addr *)params->ptr); + if (IS_ERR_OR_NULL(client_handle)) { + kfree(session); + HWFNC_ERR("failed to initialize synx_id:%d ret:%d\n", params->id, + PTR_ERR(client_handle)); + return ERR_PTR(to_synx_status(PTR_ERR(client_handle))); + } + session->client = client_handle; + session->type = params->id; + HWFNC_DBG_INIT("initialized session synx_id:%d hw_fence_id:%d\n", params->id, client_id); + + return session; +} +EXPORT_SYMBOL(synx_hwfence_initialize); + +int synx_hwfence_uninitialize(struct synx_session *session) +{ + int ret; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type); + return -SYNX_INVALID; + } + + ret = msm_hw_fence_deregister(session->client); + if (ret) + HWFNC_ERR("Failed to deregister synx_id:%d ret:%d\n", session->type, ret); + else + kfree(session); + + return to_synx_status(ret); +} +EXPORT_SYMBOL(synx_hwfence_uninitialize); + +int synx_hwfence_create(struct synx_session *session, struct synx_create_params *params) +{ + int ret = 0; + struct msm_hw_fence_create_params hwfence_params; + u64 handle; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || + IS_ERR_OR_NULL(params)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d params:0x%pK\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type, params); + return -SYNX_INVALID; + } + + if (IS_ERR_OR_NULL(params->h_synx) || (params->flags != SYNX_CREATE_DMA_FENCE) || + IS_ERR_OR_NULL(params->fence)) { + HWFNC_ERR("synx_id:%d invalid create params h_synx:0x%pK flags:0x%x fence:0x%pK\n", + session->type, params->h_synx, params->flags, params->fence); + return -SYNX_INVALID; + } + + hwfence_params.fence = params->fence; + hwfence_params.handle = &handle; + ret = msm_hw_fence_create(session->client, &hwfence_params); + if (ret) { + HWFNC_ERR("synx_id:%d failed create fence:0x%pK flags:0x%x ret:%d\n", session->type, + params->fence, params->flags, ret); + return to_synx_status(ret); + } + if (handle > U32_MAX) { + HWFNC_ERR("synx_id:%d fence handle:%llu would overflow h_synx\n", session->type, + handle); + msm_hw_fence_destroy_with_handle(session->client, handle); + return -SYNX_INVALID; + } + *params->h_synx = handle; + + return SYNX_SUCCESS; +} +EXPORT_SYMBOL(synx_hwfence_create); + +int synx_hwfence_release(struct synx_session *session, u32 h_synx) +{ + int ret; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type); + return -SYNX_INVALID; + } + + ret = msm_hw_fence_destroy_with_handle(session->client, h_synx); + if (ret) + HWFNC_ERR("synx_id:%d failed to destroy fence h_synx:%u ret:%d\n", session->type, + h_synx, ret); + + return to_synx_status(ret); +} +EXPORT_SYMBOL(synx_hwfence_release); + +int synx_hwfence_signal(struct synx_session *session, u32 h_synx, enum synx_signal_status status) +{ + int ret; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type); + return -SYNX_INVALID; + } + + ret = msm_hw_fence_update_txq(session->client, h_synx, 0, (u32)status); + if (ret) + HWFNC_ERR("synx_id:%d failed to signal fence h_synx:%u status:%d ret:%d\n", + session->type, h_synx, status, ret); + + return to_synx_status(ret); +} +EXPORT_SYMBOL(synx_hwfence_signal); + +int synx_hwfence_recover(enum synx_client_id id) +{ + int ret; + + if (!is_hw_fence_client(id)) { + HWFNC_ERR("invalid synx_id:%d\n", id); + return -SYNX_INVALID; + } + + ret = msm_hw_fence_reset_client_by_id(_get_hw_fence_client_id(id), + MSM_HW_FENCE_RESET_WITHOUT_DESTROY); + if (ret) + HWFNC_ERR("synx_id:%d failed to recover ret:%d\n", id, ret); + + return to_synx_status(ret); +} +EXPORT_SYMBOL(synx_hwfence_recover); + +static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params *params) +{ + u64 handle; + int ret; + + if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || + IS_ERR_OR_NULL(params->new_h_synx) || + (params->flags != SYNX_IMPORT_DMA_FENCE) || IS_ERR_OR_NULL(params->fence)) { + HWFNC_ERR("invalid client:0x%pK params:0x%pK h_synx:0x%pK flags:0x%x fence:0x%pK\n", + client, params, IS_ERR_OR_NULL(params) ? NULL : params->new_h_synx, + IS_ERR_OR_NULL(params) ? 0 : params->flags, + IS_ERR_OR_NULL(params) ? NULL : params->fence); + return -SYNX_INVALID; + } + + ret = msm_hw_fence_wait_update_v2(client, (struct dma_fence **)¶ms->fence, &handle, + NULL, 1, true); + if (ret) { + HWFNC_ERR("failed to import fence:0x%pK flags:0x%x ret:%d\n", params->fence, + params->flags, ret); + return to_synx_status(ret); + } + if (handle > U32_MAX) { + HWFNC_ERR("fence handle:%llu would overflow new_h_synx\n", handle); + msm_hw_fence_wait_update_v2(client, (struct dma_fence **)¶ms->fence, &handle, + NULL, 1, false); + return -SYNX_INVALID; + } + *params->new_h_synx = handle; + + return SYNX_SUCCESS; +} + +static int synx_hwfence_import_arr(void *client, struct synx_import_arr_params *params) +{ + int i, ret; + + if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || !params->num_fences) { + HWFNC_ERR("invalid import arr client:0x%pK params:0x%pK num_fences:%u\n", client, + params, IS_ERR_OR_NULL(params) ? -1 : params->num_fences); + return -SYNX_INVALID; + } + + for (i = 0; i < params->num_fences; i++) { + ret = synx_hwfence_import_indv(client, ¶ms->list[i]); + if (ret) { + HWFNC_ERR("importing fence[%u] 0x%pK failed ret:%d\n", i, + params->list[i].fence, ret); + return ret; + } + } + + return SYNX_SUCCESS; +} + +int synx_hwfence_import(struct synx_session *session, struct synx_import_params *params) +{ + int ret; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) + || IS_ERR_OR_NULL(params)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d params:0x%pK\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type, params); + return -SYNX_INVALID; + } + + if (params->type == SYNX_IMPORT_ARR_PARAMS) + ret = synx_hwfence_import_arr(session->client, ¶ms->arr); + else + ret = synx_hwfence_import_indv(session->client, ¶ms->indv); + + if (ret) + HWFNC_ERR("synx_id:%d failed to import type:%s fences ret:%d\n", session->type, + (params->type == SYNX_IMPORT_ARR_PARAMS) ? "arr" : "indv", ret); + + return ret; +} +EXPORT_SYMBOL(synx_hwfence_import); From 88f51cfe0423457f728049cd7c09c939b9e5734c Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 7 Feb 2023 10:26:37 -0800 Subject: [PATCH 057/166] mm-drivers: hw_fence: add support for multiple ipe and vpu clients Add support for signal-based reservation of hw fence client ids for ipe and vpu clients. Change-Id: I4e4a835424756c6e5fa8d5c2d340dfadc4d11541 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 10 ++++++---- hw_fence/include/hw_fence_drv_utils.h | 12 ++++++------ hw_fence/src/hw_fence_drv_priv.c | 28 ++++++++++++++------------- hw_fence/src/hw_fence_drv_utils.c | 10 ++-------- hw_fence/src/msm_hw_fence.c | 6 +++--- 5 files changed, 32 insertions(+), 34 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 9ed047208b..d17643c4d6 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -124,10 +124,12 @@ enum hw_fence_loopback_id { /** * enum hw_fence_client_data_id - Enum with the clients having client_data, an optional * parameter passed from the waiting client and returned - * to it upon fence signaling - * @HW_FENCE_CLIENT_DATA_ID_CTX0: GFX Client. - * @HW_FENCE_CLIENT_DATA_ID_IPE: IPE Client. - * @HW_FENCE_CLIENT_DATA_ID_VPU: VPU Client. + * to it upon fence signaling. Only the first HW Fence + * Client for non-VAL clients (e.g. GFX, IPE, VPU) have + * client_data. + * @HW_FENCE_CLIENT_DATA_ID_CTX0: GFX Client 0. + * @HW_FENCE_CLIENT_DATA_ID_IPE: IPE Client 0. + * @HW_FENCE_CLIENT_DATA_ID_VPU: VPU Client 0. * @HW_FENCE_CLIENT_DATA_ID_VAL0: Debug validation client 0. * @HW_FENCE_CLIENT_DATA_ID_VAL1: Debug validation client 1. * @HW_FENCE_MAX_CLIENTS_WITH_DATA: Max number of clients with data, also indicates an diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 756f07b2bf..ae711d8869 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __HW_FENCE_DRV_UTILS_H @@ -8,21 +8,21 @@ /** * HW_FENCE_MAX_CLIENT_TYPE_STATIC: - * Total number of client types without configurable number of sub-clients (GFX, DPU, VAL, IPE, VPU) + * Total number of client types without configurable number of sub-clients (GFX, DPU, VAL) */ -#define HW_FENCE_MAX_CLIENT_TYPE_STATIC 5 +#define HW_FENCE_MAX_CLIENT_TYPE_STATIC 3 /** * HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE: - * Maximum number of client types with configurable number of sub-clients (e.g. IFE) + * Maximum number of client types with configurable number of sub-clients (e.g. IPE, VPU, IFE) */ -#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 8 +#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 10 /** * HW_FENCE_MAX_STATIC_CLIENTS_INDEX: * Maximum number of static clients, i.e. clients without configurable numbers of sub-clients */ -#define HW_FENCE_MAX_STATIC_CLIENTS_INDEX HW_FENCE_CLIENT_ID_IFE0 +#define HW_FENCE_MAX_STATIC_CLIENTS_INDEX HW_FENCE_CLIENT_ID_IPE /** * enum hw_fence_mem_reserve - Types of reservations for the carved-out memory. diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index f69fd408e6..833bc1b077 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -568,10 +568,10 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, * * NOTE: For each Client HW-Core, the client drivers might be the ones making * it's own initialization (in case that any hw-sequence must be enforced), - * however, if that is not the case, any per-client ipcc init to enable the + * however, if that is not the case, any per-client ipcc init to enable the * signaling, can go here. */ - switch ((int)hw_fence_client->client_id) { + switch ((int)hw_fence_client->client_id_ext) { case HW_FENCE_CLIENT_ID_CTX0: /* nothing to initialize for gpu client */ break; @@ -594,8 +594,8 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, case HW_FENCE_CLIENT_ID_CTL5: #ifdef HW_DPU_IPCC /* initialize ipcc signals for dpu clients */ - HWFNC_DBG_H("init_controller_signal: DPU client:%d initialized:%d\n", - hw_fence_client->client_id, drv_data->ipcc_dpu_initialized); + HWFNC_DBG_H("init_controller_signal: DPU client_id_ext:%d initialized:%d\n", + hw_fence_client->client_id_ext, drv_data->ipcc_dpu_initialized); if (!drv_data->ipcc_dpu_initialized) { drv_data->ipcc_dpu_initialized = true; @@ -604,10 +604,12 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, } #endif /* HW_DPU_IPCC */ break; - case HW_FENCE_CLIENT_ID_IPE: + case HW_FENCE_CLIENT_ID_IPE ... HW_FENCE_CLIENT_ID_IPE + + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: /* nothing to initialize for IPE client */ break; - case HW_FENCE_CLIENT_ID_VPU: + case HW_FENCE_CLIENT_ID_VPU ... HW_FENCE_CLIENT_ID_VPU + + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: /* nothing to initialize for VPU client */ break; case HW_FENCE_CLIENT_ID_IFE0 ... HW_FENCE_CLIENT_ID_IFE7 + @@ -615,7 +617,7 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, /* nothing to initialize for IFE clients */ break; default: - HWFNC_ERR("Unexpected client:%d\n", hw_fence_client->client_id); + HWFNC_ERR("Unexpected client_id_ext:%d\n", hw_fence_client->client_id_ext); ret = -EINVAL; break; } @@ -1207,10 +1209,10 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, enum hw_fence_client_data_id data_id; if (client_data) { - data_id = hw_fence_get_client_data_id(hw_fence_client->client_id); + data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext); if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) { - HWFNC_ERR("Populating non-zero client_data:%llu with invalid client:%d\n", - client_data, hw_fence_client->client_id); + HWFNC_ERR("Populating client_data:%llu with invalid client_id_ext:%d\n", + client_data, hw_fence_client->client_id_ext); return -EINVAL; } } @@ -1349,9 +1351,9 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, enum hw_fence_client_data_id data_id; if (client_data) { - data_id = hw_fence_get_client_data_id(hw_fence_client->client_id); + data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext); if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) { - HWFNC_ERR("Populating non-zero client_data:%llu with invalid client:%d\n", + HWFNC_ERR("Populating client_data:%llu with invalid client_id_ext:%d\n", client_data, hw_fence_client->client_id); return -EINVAL; } @@ -1424,7 +1426,7 @@ static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, for (wait_client_id = 0; wait_client_id <= drv_data->rxq_clients_num; wait_client_id++) { if (hw_fence->wait_client_mask & BIT(wait_client_id)) { hw_fence_wait_client = drv_data->clients[wait_client_id]; - data_id = hw_fence_get_client_data_id(wait_client_id); + data_id = hw_fence_get_client_data_id(hw_fence_wait_client->client_id_ext); if (data_id < HW_FENCE_MAX_CLIENTS_WITH_DATA) client_data = hw_fence->client_data[data_id]; diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index fa407134fb..162e962dbc 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -30,12 +30,6 @@ #define HW_FENCE_MAX_CLIENT_TYPE (HW_FENCE_MAX_CLIENT_TYPE_STATIC + \ HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE) -/** - * HW_FENCE_MAX_STATIC_CLIENTS_INDEX: - * Maximum number of static clients, i.e. clients without configurable numbers of sub-clients - */ -#define HW_FENCE_MAX_STATIC_CLIENTS_INDEX HW_FENCE_CLIENT_ID_IFE0 - /** * HW_FENCE_MIN_RXQ_CLIENTS: * Minimum number of static hw fence clients with rxq @@ -52,8 +46,8 @@ #define HW_FENCE_CLIENT_TYPE_MAX_GPU 1 #define HW_FENCE_CLIENT_TYPE_MAX_DPU 6 #define HW_FENCE_CLIENT_TYPE_MAX_VAL 7 -#define HW_FENCE_CLIENT_TYPE_MAX_IPE 1 -#define HW_FENCE_CLIENT_TYPE_MAX_VPU 1 +#define HW_FENCE_CLIENT_TYPE_MAX_IPE 32 +#define HW_FENCE_CLIENT_TYPE_MAX_VPU 32 #define HW_FENCE_CLIENT_TYPE_MAX_IFE 32 /** diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index c5531727f9..f1d1b3be2f 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -313,10 +313,10 @@ int msm_hw_fence_wait_update_v2(void *client_handle, } hw_fence_client = (struct msm_hw_fence_client *)client_handle; - data_id = hw_fence_get_client_data_id(hw_fence_client->client_id); + data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext); if (client_data_list && data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) { - HWFNC_ERR("Populating non-NULL client_data_list with unsupported client id:%d\n", - hw_fence_client->client_id); + HWFNC_ERR("Populating non-NULL client_data_list with invalid client_id_ext:%d\n", + hw_fence_client->client_id_ext); return -EINVAL; } From cf8ab93a12abcefefc94190b1c9ca9c990e6fd22 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 28 Feb 2023 17:22:07 -0800 Subject: [PATCH 058/166] mm-drivers: hw_fence: fix edge cases for hwfence ioctls Clear doorbell mask for val client loopbacks. Create up to full number of possible fences for create_fence_array. Wait full amount of time for fence in ioctl. Change-Id: Ic0f2553f345932511fa9669b5383d8bfdaa23459 Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_debug.c | 3 ++- hw_fence/src/hw_fence_drv_utils.c | 12 ++++++------ hw_fence/src/hw_fence_ioctl.c | 18 ++++++++++++++---- 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 44083141a9..c3c409a3ab 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -907,6 +907,7 @@ int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, if (!drv_data->clients[client_id]) { mutex_unlock(&drv_data->clients_register_lock); + HWFNC_ERR("Processing workaround for unregistered val client:%d\n", client_id); return -EINVAL; } diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index fa407134fb..a6feec9108 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -56,6 +56,12 @@ #define HW_FENCE_CLIENT_TYPE_MAX_VPU 1 #define HW_FENCE_CLIENT_TYPE_MAX_IFE 32 +/* + * Each bit in this mask represents each of the loopback clients supported in + * the enum hw_fence_loopback_id + */ +#define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7fff + /** * struct hw_fence_client_types - Table describing all supported client types, used to parse * device-tree properties related to client queue size. @@ -162,12 +168,6 @@ void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, } } -/* - * Each bit in this mask represents each of the loopback clients supported in - * the enum hw_fence_loopback_id - */ -#define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7f - static inline int _process_dpu_client_loopback(struct hw_fence_driver_data *drv_data, int client_id) { diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 7c5b141faf..456732d0db 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -39,6 +40,8 @@ .name = #ioctl \ } +#define ktime_compare_safe(A, B) ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0)) + /** * struct hw_sync_obj - per client hw sync object. * @context: context id used to create fences. @@ -371,7 +374,7 @@ static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned l return -EFAULT; num_fences = data.num_fences; - if (num_fences >= HW_FENCE_ARRAY_SIZE) { + if (num_fences > HW_FENCE_ARRAY_SIZE) { HWFNC_ERR("Number of fences: %d is greater than allowed size: %d\n", num_fences, HW_FENCE_ARRAY_SIZE); return -EINVAL; @@ -559,6 +562,7 @@ static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg) struct msm_hw_fence_queue_payload payload; struct hw_fence_sync_wait_data data; struct dma_fence *fence; + ktime_t cur_ktime, exp_ktime; int fd, ret, read = 1, queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */ if (!_is_valid_client(obj)) @@ -582,9 +586,15 @@ static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg) return -EINVAL; } - ret = wait_event_timeout(hw_fence_client->wait_queue, - atomic_read(&hw_fence_client->val_signal) > 0, - msecs_to_jiffies(data.timeout_ms)); + exp_ktime = ktime_add_ms(ktime_get(), data.timeout_ms); + do { + ret = wait_event_timeout(hw_fence_client->wait_queue, + atomic_read(&hw_fence_client->val_signal) > 0, + msecs_to_jiffies(data.timeout_ms)); + cur_ktime = ktime_get(); + } while ((atomic_read(&hw_fence_client->val_signal) <= 0) && (ret == 0) && + ktime_compare_safe(exp_ktime, cur_ktime) > 0); + if (!ret) { HWFNC_ERR("timed out waiting for the client signal %d\n", data.timeout_ms); /* Decrement the refcount that hw_sync_get_fence increments */ From e59a1e44645e426550c098a68948a79182aea232 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 14 Mar 2023 11:35:19 -0700 Subject: [PATCH 059/166] mm-drivers: hw_fence: avoid compiling synx translation layer on kalama Avoid compiling synx translation layer on kalama target where synx driver is not available. Change-Id: I0a4f8c291fc3843065e75f536b4e16a246ea69d4 Signed-off-by: Grace An --- hw_fence/Kbuild | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/hw_fence/Kbuild b/hw_fence/Kbuild index 55334e8b65..3bcd693da7 100644 --- a/hw_fence/Kbuild +++ b/hw_fence/Kbuild @@ -3,9 +3,7 @@ KDIR := $(TOP)/kernel_platform/msm-kernel include $(MSM_HW_FENCE_ROOT)/config/kalamammdrivers.conf LINUXINCLUDE += -include $(MSM_HW_FENCE_ROOT)/config/kalamammdriversconf.h \ - -I$(MSM_HW_FENCE_ROOT)hw_fence/include/ \ - -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/msm/synx/ \ - -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/include/uapi/synx/media/ + -I$(MSM_HW_FENCE_ROOT)hw_fence/include/ ifdef CONFIG_QTI_HW_FENCE obj-m += msm_hw_fence.o @@ -14,8 +12,13 @@ msm_hw_fence-y := src/msm_hw_fence.o \ src/hw_fence_drv_priv.o \ src/hw_fence_drv_utils.o \ src/hw_fence_drv_debug.o \ - src/hw_fence_drv_ipc.o \ - src/msm_hw_fence_synx_translation.o + src/hw_fence_drv_ipc.o + +ifneq ($(CONFIG_ARCH_KALAMA), y) +LINUXINCLUDE += -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/msm/synx/ \ + -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/include/uapi/synx/media/ +msm_hw_fence-y += src/msm_hw_fence_synx_translation.o +endif msm_hw_fence-$(CONFIG_DEBUG_FS) += src/hw_fence_ioctl.o From 2348b032738c2e0498ee283e419ff42b38e9b773 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 13 Dec 2022 11:49:57 -0800 Subject: [PATCH 060/166] mm-drivers: hw_fence: add device-tree configurable queue padding Add device-tree configurable padding in bytes before and after queue header(s). This enables support for 32-byte aligned queue write_idx, which is a requirement to satisfy hardware constraints by some clients. Change-Id: Icfd6bb385c825a8629974c72522efdc3cbfe3303 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 45 +++++++-- hw_fence/include/hw_fence_drv_utils.h | 37 ++------ hw_fence/src/hw_fence_drv_priv.c | 43 +++++---- hw_fence/src/hw_fence_drv_utils.c | 130 ++++++++++++++++++-------- hw_fence/src/msm_hw_fence.c | 20 ++-- 5 files changed, 178 insertions(+), 97 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index d17643c4d6..2e03faba41 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -172,6 +172,7 @@ enum payload_type { * number of sub-clients (e.g. ife clients) * @mem_descriptor: hfi header memory descriptor * @queues: queues descriptor + * @queues_num: number of client queues * @ipc_signal_id: id of the signal to be triggered for this client * @ipc_client_vid: virtual id of the ipc client for this hw fence driver client * @ipc_client_pid: physical id of the ipc client for this hw fence driver client @@ -187,6 +188,7 @@ struct msm_hw_fence_client { enum hw_fence_client_id client_id_ext; struct msm_hw_fence_mem_addr mem_descriptor; struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES]; + int queues_num; int ipc_signal_id; int ipc_client_vid; int ipc_client_pid; @@ -239,24 +241,48 @@ struct msm_hw_fence_dbg_data { }; /** - * struct hw_fence_client_queue_size_desc - Structure holding client queue properties for a client. + * struct hw_fence_client_type_desc - Structure holding client type properties, including static + * properties and client queue properties read from device-tree. * - * @queues_num: number of client queues - * @queue_entries: number of queue entries per client queue - * @mem_size: size of memory allocated for client queues - * @start_offset: start offset of client queue memory region, from beginning of carved-out memory - * allocation for hw fence driver + * @name: name of client type, used to parse properties from device-tree + * @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g. + * HW_FENCE_CLIENT_ID_CTL0 for DPU clients + * @max_clients_num: maximum number of clients of given client type + * @clients_num: number of clients of given client type + * @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or + * two (for both Tx and Rx Queues) + * @queue_entries: number of entries per client queue of given client type + * @start_padding: size of padding between queue table header and first queue header in bytes + * @end_padding: size of padding between queue header(s) and first queue payload in bytes + * @mem_size: size of memory allocated for client queue(s) per client in bytes * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence * driver and hfi_header->tx_wm is updated instead */ -struct hw_fence_client_queue_size_desc { +struct hw_fence_client_type_desc { + char *name; + enum hw_fence_client_id init_id; + u32 max_clients_num; + u32 clients_num; u32 queues_num; u32 queue_entries; + u32 start_padding; + u32 end_padding; u32 mem_size; - u32 start_offset; bool skip_txq_wr_idx; }; +/** + * struct hw_fence_client_queue_desc - Structure holding client queue properties for a client. + * + * @type: pointer to client queue properties of client type + * @start_offset: start offset of client queue memory region, from beginning of carved-out memory + * allocation for hw fence driver + */ +struct hw_fence_client_queue_desc { + struct hw_fence_client_type_desc *type; + u32 start_offset; +}; + /** * struct hw_fence_driver_data - Structure holding internal hw-fence driver data * @@ -268,6 +294,7 @@ struct hw_fence_client_queue_size_desc { * @hw_fence_ctrl_queue_size: size of the ctrl queue for the payload * @hw_fence_mem_ctrl_queues_size: total size of ctrl queues, including: header + rxq + txq * @hw_fence_client_queue_size: descriptors of client queue properties for each hw fence client + * @hw_fence_client_types: descriptors of properties for each hw fence client type * @rxq_clients_num: number of supported hw fence clients with rxq (configured based on device-tree) * @clients_num: number of supported hw fence clients (configured based on device-tree) * @hw_fences_tbl: pointer to the hw-fences table @@ -320,7 +347,7 @@ struct hw_fence_driver_data { u32 hw_fence_ctrl_queue_size; u32 hw_fence_mem_ctrl_queues_size; /* client queues */ - struct hw_fence_client_queue_size_desc *hw_fence_client_queue_size; + struct hw_fence_client_queue_desc *hw_fence_client_queue_size; struct hw_fence_client_type_desc *hw_fence_client_types; u32 rxq_clients_num; u32 clients_num; diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index ae711d8869..454b5b570d 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -38,33 +38,6 @@ enum hw_fence_mem_reserve { HW_FENCE_MEM_RESERVE_CLIENT_QUEUE }; -/** - * struct hw_fence_client_type_desc - Structure holding client type properties, including static - * properties and client queue properties read from device-tree. - * - * @name: name of client type, used to parse properties from device-tree - * @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g. - * HW_FENCE_CLIENT_ID_CTL0 for DPU clients - * @max_clients_num: maximum number of clients of given client type - * @clients_num: number of clients of given client type - * @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or - * two (for both Tx and Rx Queues) - * @queue_entries: number of entries per client queue of given client type - * @mem_size: size of memory allocated for client queue(s) per client - * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence - * driver and hfi_header->tx_wm is updated instead - */ -struct hw_fence_client_type_desc { - char *name; - enum hw_fence_client_id init_id; - u32 max_clients_num; - u32 clients_num; - u32 queues_num; - u32 queue_entries; - u32 mem_size; - bool skip_txq_wr_idx; -}; - /** * global_atomic_store() - Inter-processor lock * @drv_data: hw fence driver data @@ -173,6 +146,16 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data, enum hw_fence_client_id client_id); +/** + * hw_fence_utils_get_queues_num() - Returns number of client queues for the client_id. + * + * @drv_data: driver data + * @client_id: hw fence driver client id + * + * Returns: number of client queues + */ +int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id); + /** * hw_fence_utils_skips_txq_wr_index() - Returns bool to indicate if client Tx Queue write_index * is not updated in hw fence driver. Instead, diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 833bc1b077..ea931f1510 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -32,10 +32,12 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, { struct msm_hw_fence_hfi_queue_table_header *hfi_table_header; struct msm_hw_fence_hfi_queue_header *hfi_queue_header; + struct hw_fence_client_type_desc *desc; void *ptr, *qptr; phys_addr_t phys, qphys; u32 size, start_queue_offset; int headers_size, queue_size, payload_size; + int start_padding = 0, end_padding = 0; int i, ret = 0; HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id); @@ -46,14 +48,19 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD; break; case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: - if (client_id >= drv_data->clients_num) { - HWFNC_ERR("Invalid client_id: %d\n", client_id); + if (client_id >= drv_data->clients_num || + !drv_data->hw_fence_client_queue_size[client_id].type) { + HWFNC_ERR("Invalid client_id:%d for clients_num:%lu\n", client_id, + drv_data->clients_num); return -EINVAL; } - headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num); - queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * - drv_data->hw_fence_client_queue_size[client_id].queue_entries; + desc = drv_data->hw_fence_client_queue_size[client_id].type; + start_padding = desc->start_padding; + end_padding = desc->end_padding; + headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num) + start_padding + + end_padding; + queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries; payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD; break; default: @@ -75,16 +82,15 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, mem_descriptor->size = size; /* bytes */ mem_descriptor->mem_data = NULL; /* Currently we don't need any special info */ - HWFNC_DBG_INIT("Initialize headers\n"); + HWFNC_DBG_INIT("Initialize headers: headers_size:%d start_padding:%d end_padding:%d\n", + headers_size, start_padding, end_padding); /* Initialize headers info within hfi memory */ hfi_table_header = (struct msm_hw_fence_hfi_queue_table_header *)ptr; hfi_table_header->version = 0; hfi_table_header->size = size; /* bytes */ /* Offset, from the Base Address, where the first queue header starts */ - hfi_table_header->qhdr0_offset = - sizeof(struct msm_hw_fence_hfi_queue_table_header); - hfi_table_header->qhdr_size = - sizeof(struct msm_hw_fence_hfi_queue_header); + hfi_table_header->qhdr0_offset = HW_FENCE_HFI_TABLE_HEADER_SIZE + start_padding; + hfi_table_header->qhdr_size = HW_FENCE_HFI_QUEUE_HEADER_SIZE; hfi_table_header->num_q = queues_num; /* number of queues */ hfi_table_header->num_active_q = queues_num; @@ -96,7 +102,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, */ HWFNC_DBG_INIT("Initialize queues\n"); hfi_queue_header = (struct msm_hw_fence_hfi_queue_header *) - ((char *)ptr + HW_FENCE_HFI_TABLE_HEADER_SIZE); + ((char *)ptr + hfi_table_header->qhdr0_offset); for (i = 0; i < queues_num; i++) { HWFNC_DBG_INIT("init queue[%d]\n", i); @@ -251,10 +257,9 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, u32 *wr_ptr; int ret = 0; - if (queue_type >= - drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].queues_num) { - HWFNC_ERR("Invalid queue type:%s client_id:%d\n", queue_type, - hw_fence_client->client_id); + if (queue_type >= hw_fence_client->queues_num) { + HWFNC_ERR("Invalid queue type:%d client_id:%d q_num:%lu\n", queue_type, + hw_fence_client->client_id, hw_fence_client->queues_num); return -EINVAL; } @@ -539,10 +544,16 @@ int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data, { int ret; + if (!drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].type) { + HWFNC_ERR("invalid client_id:%d not reserved client queue; check dt props\n", + hw_fence_client->client_id); + return -EINVAL; + } + /* Init client queues */ ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CLIENT_QUEUE, &hw_fence_client->mem_descriptor, hw_fence_client->queues, - drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].queues_num, + drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].type->queues_num, hw_fence_client->client_id); if (ret) { HWFNC_ERR("Failure to init the queue for client:%d\n", diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 9293e52cb3..e2dc4b04ca 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -77,23 +77,23 @@ */ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = { {"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU, - HW_FENCE_CLIENT_QUEUES, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, {"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU, - HW_FENCE_CLIENT_QUEUES, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, {"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL, - HW_FENCE_CLIENT_QUEUES, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, {"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, - HW_FENCE_CLIENT_QUEUES, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, - HW_FENCE_CLIENT_QUEUES, 0, 0, false}, - {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, - {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, - {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, - {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, - {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, - {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, - {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, - {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, + {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, }; static void _lock(uint64_t *wait) @@ -549,23 +549,16 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, *size = drv_data->hw_fence_mem_fences_table_size; break; case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: - if (client_id >= drv_data->clients_num) { - HWFNC_ERR("unexpected client_id:%d\n", client_id); + if (client_id >= drv_data->clients_num || + !drv_data->hw_fence_client_queue_size[client_id].type) { + HWFNC_ERR("unexpected client_id:%d for clients_num:%lu\n", client_id, + drv_data->clients_num); ret = -EINVAL; goto exit; } start_offset = drv_data->hw_fence_client_queue_size[client_id].start_offset; - *size = drv_data->hw_fence_client_queue_size[client_id].mem_size; - - /* - * If this error occurs when client should be valid, check that support for this - * client has been configured in device-tree properties. - */ - if (!*size) { - HWFNC_ERR("invalid client_id:%d not reserved client queue\n", client_id); - ret = -EINVAL; - } + *size = drv_data->hw_fence_client_queue_size[client_id].type->mem_size; break; default: HWFNC_ERR("Invalid mem reserve type:%d\n", type); @@ -592,6 +585,49 @@ exit: return ret; } +static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_data, + struct hw_fence_client_type_desc *desc) +{ + char name[40]; + u32 tmp[2]; + int ret; + + snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s-extra", desc->name); + ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 2); + + /* extra dt props not set */ + if (ret) + return 0; + + desc->start_padding = tmp[0]; + desc->end_padding = tmp[1]; + + if (desc->start_padding % sizeof(u32) || desc->end_padding % sizeof(u32) || + (desc->start_padding + desc->end_padding) % sizeof(u64)) { + HWFNC_ERR("%s start_padding:%lu end_padding:%lu violates mem alignment\n", + desc->name, desc->start_padding, desc->end_padding); + return -EINVAL; + } + + if (desc->start_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) { + HWFNC_ERR("%s client queues_num:%lu start_padding:%lu will overflow mem_size\n", + desc->name, desc->queues_num, desc->start_padding); + return -EINVAL; + } + + if (desc->end_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) - + desc->start_padding) { + HWFNC_ERR("%s client q_num:%lu start_p:%lu end_p:%lu will overflow mem_size\n", + desc->name, desc->queues_num, desc->start_padding, desc->end_padding); + return -EINVAL; + } + + HWFNC_DBG_INIT("%s: start_padding_size=%lu end_padding_size=%lu\n", desc->name, + desc->start_padding, desc->end_padding); + + return 0; +} + static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_data, struct hw_fence_client_type_desc *desc) { @@ -600,7 +636,7 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da u32 queue_size; int ret; - /* parse client queue property from device-tree */ + /* parse client queue properties from device-tree */ snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s", desc->name); ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 4); if (ret) { @@ -626,6 +662,13 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da return -EINVAL; } + /* parse extra client queue properties from device-tree */ + ret = _parse_client_queue_dt_props_extra(drv_data, desc); + if (ret) { + HWFNC_ERR("%s failed to parse extra dt props\n", desc->name); + return -EINVAL; + } + /* compute mem_size */ if (desc->queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) { HWFNC_ERR("%s client queue entries:%lu will overflow client queue size\n", @@ -635,17 +678,18 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries; if (queue_size >= ((U32_MAX & PAGE_MASK) - - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) / desc->queues_num) { - HWFNC_ERR("%s client queue size:%lu will overflow client queue mem size\n", - desc->name, queue_size); + (HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) + + desc->start_padding + desc->end_padding)) / desc->queues_num) { + HWFNC_ERR("%s client queue_sz:%lu start_p:%lu end_p:%lu will overflow mem size\n", + desc->name, queue_size, desc->start_padding, desc->end_padding); return -EINVAL; } desc->mem_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) + - (queue_size * desc->queues_num)); + (queue_size * desc->queues_num) + desc->start_padding + desc->end_padding); if (desc->mem_size > MAX_CLIENT_QUEUE_MEM_SIZE) { - HWFNC_ERR("%s client queue mem_size:%lu greater than max client queue size:%lu\n", + HWFNC_ERR("%s client queue mem_size:%lu greater than max mem size:%lu\n", desc->name, desc->mem_size, MAX_CLIENT_QUEUE_MEM_SIZE); return -EINVAL; } @@ -690,7 +734,7 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data) drv_data->clients_num = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + configurable_clients_num; /* allocate memory for client queue size descriptors */ - size = drv_data->clients_num * sizeof(struct hw_fence_client_queue_size_desc); + size = drv_data->clients_num * sizeof(struct hw_fence_client_queue_desc); drv_data->hw_fence_client_queue_size = kzalloc(size, GFP_KERNEL); if (!drv_data->hw_fence_client_queue_size) return -ENOMEM; @@ -707,9 +751,7 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data) hw_fence_utils_get_client_id_priv(drv_data, client_id_ext); drv_data->hw_fence_client_queue_size[client_id] = - (struct hw_fence_client_queue_size_desc) - {desc->queues_num, desc->queue_entries, desc->mem_size, - start_offset, desc->skip_txq_wr_idx}; + (struct hw_fence_client_queue_desc){desc, start_offset}; HWFNC_DBG_INIT("%s client_id_ext:%lu client_id:%lu start_offset:%lu\n", desc->name, client_id_ext, client_id, start_offset); start_offset += desc->mem_size; @@ -929,10 +971,24 @@ enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver return client_id_priv; } +int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id) +{ + if (!drv_data || client_id >= drv_data->clients_num || + !drv_data->hw_fence_client_queue_size[client_id].type) { + HWFNC_ERR("invalid access to client:%d queues_num\n", client_id); + return 0; + } + + return drv_data->hw_fence_client_queue_size[client_id].type->queues_num; +} + bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id) { - if (!drv_data || client_id >= drv_data->clients_num) + if (!drv_data || client_id >= drv_data->clients_num || + !drv_data->hw_fence_client_queue_size[client_id].type) { + HWFNC_ERR("invalid access to client:%d skips_txq_wr_idx\n", client_id); return false; + } - return drv_data->hw_fence_client_queue_size[client_id].skip_txq_wr_idx; + return drv_data->hw_fence_client_queue_size[client_id].type->skip_txq_wr_idx; } diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 517a991741..d243b06543 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -87,15 +87,18 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, } hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id); - if (hw_fence_client->update_rxq && - hw_fence_drv_data->hw_fence_client_queue_size[client_id].queues_num < - HW_FENCE_CLIENT_QUEUES) { - HWFNC_ERR("Cannot update rx queue for tx queue-only client:%d\n", client_id); + hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id); + + hw_fence_client->queues_num = hw_fence_utils_get_queues_num(hw_fence_drv_data, client_id); + if (!hw_fence_client->queues_num || (hw_fence_client->update_rxq && + hw_fence_client->queues_num < HW_FENCE_CLIENT_QUEUES)) { + HWFNC_ERR("client:%d invalid q_num:%lu for updates_rxq:%s\n", client_id, + hw_fence_client->queues_num, + hw_fence_client->update_rxq ? "true" : "false"); ret = -EINVAL; goto error; } - hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id); hw_fence_client->skip_txq_wr_idx = hw_fence_utils_skips_txq_wr_idx(hw_fence_drv_data, client_id); @@ -118,9 +121,10 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, if (ret) goto error; - HWFNC_DBG_INIT("-- Initialized ptr:0x%p client_id:%d ipc_signal_id:%d ipc vid:%d pid:%d\n", - hw_fence_client, hw_fence_client->client_id, hw_fence_client->ipc_signal_id, - hw_fence_client->ipc_client_vid, hw_fence_client->ipc_client_pid); + HWFNC_DBG_INIT("Initialized ptr:0x%p client_id:%d q_num:%d ipc signal:%d vid:%d pid:%d\n", + hw_fence_client, hw_fence_client->client_id, hw_fence_client->queues_num, + hw_fence_client->ipc_signal_id, hw_fence_client->ipc_client_vid, + hw_fence_client->ipc_client_pid); #if IS_ENABLED(CONFIG_DEBUG_FS) init_waitqueue_head(&hw_fence_client->wait_queue); From 414d3b480d09eb243261b2a1c4ba49da5957d436 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 11 Jan 2023 16:58:22 -0800 Subject: [PATCH 061/166] mm-drivers: hw_fence: add support for client queue alternate indexing Some clients require that write_index starts from nonzero value and index by payload instead of by dwords. Add support for device-tree configurable properties to control nonzero index start_index and indexing by payload for client tx queue read and write indices. Change-Id: I8942dc2d25a7d1cb0421cabd36c73a404ecd0134 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 14 +++- hw_fence/include/hw_fence_drv_utils.h | 13 --- hw_fence/src/hw_fence_drv_priv.c | 79 ++++++++++++++++-- hw_fence/src/hw_fence_drv_utils.c | 116 +++++++++++++++++--------- hw_fence/src/msm_hw_fence.c | 5 +- 5 files changed, 163 insertions(+), 64 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 2e03faba41..f1786831fb 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -150,12 +150,19 @@ enum hw_fence_client_data_id { * @q_size_bytes: size of the queue * @va_header: pointer to the hfi header virtual address * @pa_queue: physical address of the queue + * @rd_wr_idx_start: start read and write indexes for client queue (zero by default) + * @rd_wr_idx_factor: factor to multiply custom index to get index in dwords (one by default) + * @skip_wr_idx: bool to indicate if update to write_index is skipped within hw fence driver and + * hfi_header->tx_wm is updated instead */ struct msm_hw_fence_queue { void *va_queue; u32 q_size_bytes; void *va_header; phys_addr_t pa_queue; + u32 rd_wr_idx_start; + u32 rd_wr_idx_factor; + bool skip_wr_idx; }; /** @@ -178,8 +185,6 @@ enum payload_type { * @ipc_client_pid: physical id of the ipc client for this hw fence driver client * @update_rxq: bool to indicate if client uses rx-queue * @send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences - * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence - * driver and hfi_header->tx_wm is updated instead * @wait_queue: wait queue for the validation clients * @val_signal: doorbell flag to signal the validation clients in the wait queue */ @@ -194,7 +199,6 @@ struct msm_hw_fence_client { int ipc_client_pid; bool update_rxq; bool send_ipc; - bool skip_txq_wr_idx; #if IS_ENABLED(CONFIG_DEBUG_FS) wait_queue_head_t wait_queue; atomic_t val_signal; @@ -255,6 +259,8 @@ struct msm_hw_fence_dbg_data { * @start_padding: size of padding between queue table header and first queue header in bytes * @end_padding: size of padding between queue header(s) and first queue payload in bytes * @mem_size: size of memory allocated for client queue(s) per client in bytes + * @txq_idx_start: start read and write indexes for client tx queue (zero by default) + * @txq_idx_factor: factor to multiply custom TxQ idx to get index in dwords (one by default) * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence * driver and hfi_header->tx_wm is updated instead */ @@ -268,6 +274,8 @@ struct hw_fence_client_type_desc { u32 start_padding; u32 end_padding; u32 mem_size; + u32 txq_idx_start; + u32 txq_idx_factor; bool skip_txq_wr_idx; }; diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 454b5b570d..6b35962f41 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -156,17 +156,4 @@ enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver */ int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id); -/** - * hw_fence_utils_skips_txq_wr_index() - Returns bool to indicate if client Tx Queue write_index - * is not updated in hw fence driver. Instead, - * hfi_header->tx_wm tracks where payload is written within - * the queue. - * - * @drv_data: driver data - * @client_id: hw fence driver client id - * - * Returns: true if hw fence driver skips update to client tx queue write_index, false otherwise - */ -bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id); - #endif /* __HW_FENCE_DRV_UTILS_H */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index ea931f1510..f47abca728 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -15,6 +15,8 @@ /* Global atomic lock */ #define GLOBAL_ATOMIC_STORE(drv_data, lock, val) global_atomic_store(drv_data, lock, val) +#define IS_HW_FENCE_TX_QUEUE(queue_type) ((queue_type) == HW_FENCE_TX_QUEUE - 1) + inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data) { #ifdef HWFENCE_USE_SLEEP_TIMER @@ -35,10 +37,11 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, struct hw_fence_client_type_desc *desc; void *ptr, *qptr; phys_addr_t phys, qphys; - u32 size, start_queue_offset; + u32 size, start_queue_offset, txq_idx_start = 0, txq_idx_factor = 1; int headers_size, queue_size, payload_size; int start_padding = 0, end_padding = 0; int i, ret = 0; + bool skip_txq_wr_idx = false; HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id); switch (mem_reserve_id) { @@ -62,6 +65,9 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, end_padding; queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries; payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD; + txq_idx_start = desc->txq_idx_start; + txq_idx_factor = desc->txq_idx_factor ? desc->txq_idx_factor : 1; + skip_txq_wr_idx = desc->skip_txq_wr_idx; break; default: HWFNC_ERR("Unexpected mem reserve id: %d\n", mem_reserve_id); @@ -115,7 +121,8 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, hfi_queue_header->start_addr = qphys; /* Set the queue type (i.e. RX or TX queue) */ - hfi_queue_header->type = (i == 0) ? HW_FENCE_TX_QUEUE : HW_FENCE_RX_QUEUE; + hfi_queue_header->type = IS_HW_FENCE_TX_QUEUE(i) ? HW_FENCE_TX_QUEUE : + HW_FENCE_RX_QUEUE; /* Set the size of this header */ hfi_queue_header->queue_size = queue_size; @@ -123,6 +130,20 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, /* Set the payload size */ hfi_queue_header->pkt_size = payload_size; + /* Set write index for clients' tx queues that index from nonzero value */ + if (txq_idx_start && IS_HW_FENCE_TX_QUEUE(i) && !hfi_queue_header->write_index) { + if (skip_txq_wr_idx) + hfi_queue_header->tx_wm = txq_idx_start; + hfi_queue_header->read_index = txq_idx_start; + hfi_queue_header->write_index = txq_idx_start; + HWFNC_DBG_INIT("init:TX_QUEUE client:%d rd_idx=%s=%lu\n", client_id, + skip_txq_wr_idx ? "wr_idx=tx_wm" : "wr_idx", + txq_idx_start); + } + + /* Update memory for hfi_queue_header */ + wmb(); + /* Store Memory info in the Client data */ queues[i].va_queue = qptr; queues[i].pa_queue = qphys; @@ -133,6 +154,18 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, client_id, i, queues[i].va_queue, queues[i].pa_queue, queues[i].va_header, queues[i].q_size_bytes, payload_size); + /* Store additional tx queue rd_wr_idx properties */ + if (IS_HW_FENCE_TX_QUEUE(i)) { + queues[i].rd_wr_idx_start = txq_idx_start; + queues[i].rd_wr_idx_factor = txq_idx_factor; + queues[i].skip_wr_idx = skip_txq_wr_idx; + } else { + queues[i].rd_wr_idx_factor = 1; + } + HWFNC_DBG_INIT("rd_wr_idx_start:%lu rd_wr_idx_factor:%lu skip_wr_idx:%s\n", + queues[i].rd_wr_idx_start, queues[i].rd_wr_idx_factor, + queues[i].skip_wr_idx ? "true" : "false"); + /* Next header */ hfi_queue_header++; } @@ -189,6 +222,14 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, read_idx = readl_relaxed(&hfi_header->read_index); write_idx = readl_relaxed(&hfi_header->write_index); + /* translate read and write indexes from custom indexing to dwords with no offset */ + if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { + read_idx = (read_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; + write_idx = (write_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; + HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n", + read_idx, write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } + HWFNC_DBG_Q("read client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n", hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, read_idx, write_idx, queue); @@ -215,6 +256,13 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, if (to_read_idx >= q_size_u32) to_read_idx = 0; + /* translate to_read_idx to custom indexing with offset */ + if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { + to_read_idx = (to_read_idx / queue->rd_wr_idx_factor) + queue->rd_wr_idx_start; + HWFNC_DBG_Q("translated to_read_idx:%lu rd_wr_idx start:%lu factor:%lu\n", + to_read_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } + /* Read the Client Queue */ payload->ctxt_id = readq_relaxed(&read_ptr_payload->ctxt_id); payload->seqno = readq_relaxed(&read_ptr_payload->seqno); @@ -275,8 +323,8 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, return -EINVAL; } - /* if skipping update txq wr_index, then use hfi_header->tx_wm instead */ - if (queue_type == (HW_FENCE_TX_QUEUE - 1) && hw_fence_client->skip_txq_wr_idx) + /* if skipping update wr_index, then use hfi_header->tx_wm instead */ + if (queue->skip_wr_idx) wr_ptr = &hfi_header->tx_wm; else wr_ptr = &hfi_header->write_index; @@ -310,8 +358,15 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, HWFNC_DBG_Q("wr client:%d r_ptr:0x%pK w_ptr:0x%pK r_idx:%d w_idx:%d q:0x%pK type:%d s:%s\n", hw_fence_client->client_id, &hfi_header->read_index, wr_ptr, - read_idx, write_idx, queue, queue_type, - hw_fence_client->skip_txq_wr_idx ? "true" : "false"); + read_idx, write_idx, queue, queue_type, queue->skip_wr_idx ? "true" : "false"); + + /* translate read and write indexes from custom indexing to dwords with no offset */ + if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { + read_idx = (read_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; + write_idx = (write_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; + HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n", + read_idx, write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } /* Check queue to make sure message will fit */ q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) : @@ -346,6 +401,13 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, if (to_write_idx >= q_size_u32) to_write_idx = 0; + /* translate to_write_idx to custom indexing with offset */ + if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { + to_write_idx = (to_write_idx / queue->rd_wr_idx_factor) + queue->rd_wr_idx_start; + HWFNC_DBG_Q("translated to_write_idx:%lu rd_wr_idx start:%lu factor:%lu\n", + to_write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } + /* Update Client Queue */ writeq_relaxed(payload_size, &write_ptr_payload->size); writew_relaxed(HW_FENCE_PAYLOAD_TYPE_1, &write_ptr_payload->type); @@ -1462,8 +1524,12 @@ void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, /* For the client TxQ: set the read-index same as last write that was done by the client */ mb(); /* make sure data is ready before read */ wr_idx = readl_relaxed(&hfi_header->write_index); + if (queue->skip_wr_idx) + hfi_header->tx_wm = wr_idx; writel_relaxed(wr_idx, &hfi_header->read_index); wmb(); /* make sure data is updated after write the index*/ + HWFNC_DBG_Q("update tx queue %s to match write_index:%lu\n", + queue->skip_wr_idx ? "read_index=tx_wm" : "read_index", wr_idx); /* For the client RxQ: set the write-index same as last read done by the client */ if (hw_fence_client->update_rxq) { @@ -1489,6 +1555,7 @@ void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, /* unlock */ GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0); + HWFNC_DBG_Q("update rx queue write_index to match read_index:%lu\n", rd_idx); } } diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index e2dc4b04ca..a42329ecb7 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -77,23 +77,31 @@ */ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = { {"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false}, {"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false}, {"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, - {"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, - {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, - {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false}, + {"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, HW_FENCE_CLIENT_QUEUES, + 0, 0, 0, 0, 0, 0, false}, + {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, HW_FENCE_CLIENT_QUEUES, + 0, 0, 0, 0, 0, 0, false}, + {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, }; static void _lock(uint64_t *wait) @@ -588,44 +596,87 @@ exit: static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_data, struct hw_fence_client_type_desc *desc) { + u32 max_idx_from_zero, payload_size_u32 = HW_FENCE_CLIENT_QUEUE_PAYLOAD / sizeof(u32); char name[40]; - u32 tmp[2]; - int ret; + u32 tmp[4]; + bool idx_by_payload = false; + int count, ret; snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s-extra", desc->name); - ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 2); - /* extra dt props not set */ - if (ret) + /* check if property is present */ + ret = of_property_read_bool(drv_data->dev->of_node, name); + if (!ret) return 0; + count = of_property_count_u32_elems(drv_data->dev->of_node, name); + if (count <= 0 || count > 4) { + HWFNC_ERR("invalid %s extra dt props count:%d\n", desc->name, count); + return -EINVAL; + } + + ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, count); + if (ret) { + HWFNC_ERR("Failed to read %s extra dt properties ret=%d count=%d\n", desc->name, + ret, count); + ret = -EINVAL; + goto exit; + } + desc->start_padding = tmp[0]; - desc->end_padding = tmp[1]; + if (count >= 2) + desc->end_padding = tmp[1]; + if (count >= 3) + desc->txq_idx_start = tmp[2]; + if (count >= 4) { + if (tmp[3] > 1) { + HWFNC_ERR("%s invalid txq_idx_by_payload prop:%lu\n", desc->name, tmp[3]); + ret = -EINVAL; + goto exit; + } + idx_by_payload = tmp[3]; + desc->txq_idx_factor = idx_by_payload ? payload_size_u32 : 1; + } if (desc->start_padding % sizeof(u32) || desc->end_padding % sizeof(u32) || (desc->start_padding + desc->end_padding) % sizeof(u64)) { HWFNC_ERR("%s start_padding:%lu end_padding:%lu violates mem alignment\n", desc->name, desc->start_padding, desc->end_padding); - return -EINVAL; + ret = -EINVAL; + goto exit; } if (desc->start_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) { HWFNC_ERR("%s client queues_num:%lu start_padding:%lu will overflow mem_size\n", desc->name, desc->queues_num, desc->start_padding); - return -EINVAL; + ret = -EINVAL; + goto exit; } if (desc->end_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) - desc->start_padding) { HWFNC_ERR("%s client q_num:%lu start_p:%lu end_p:%lu will overflow mem_size\n", desc->name, desc->queues_num, desc->start_padding, desc->end_padding); - return -EINVAL; + ret = -EINVAL; + goto exit; } - HWFNC_DBG_INIT("%s: start_padding_size=%lu end_padding_size=%lu\n", desc->name, - desc->start_padding, desc->end_padding); + max_idx_from_zero = idx_by_payload ? desc->queue_entries : + desc->queue_entries * payload_size_u32; + if (desc->txq_idx_start >= U32_MAX - max_idx_from_zero) { + HWFNC_ERR("%s txq_idx start:%lu by_payload:%s q_entries:%d will overflow txq_idx\n", + desc->name, desc->txq_idx_start, idx_by_payload ? "true" : "false", + desc->queue_entries); + ret = -EINVAL; + goto exit; + } - return 0; + HWFNC_DBG_INIT("%s: start_p=%lu end_p=%lu txq_idx_start:%lu txq_idx_by_payload:%s\n", + desc->name, desc->start_padding, desc->end_padding, desc->txq_idx_start, + idx_by_payload ? "true" : "false"); + +exit: + return ret; } static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_data, @@ -981,14 +1032,3 @@ int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int cli return drv_data->hw_fence_client_queue_size[client_id].type->queues_num; } - -bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id) -{ - if (!drv_data || client_id >= drv_data->clients_num || - !drv_data->hw_fence_client_queue_size[client_id].type) { - HWFNC_ERR("invalid access to client:%d skips_txq_wr_idx\n", client_id); - return false; - } - - return drv_data->hw_fence_client_queue_size[client_id].type->skip_txq_wr_idx; -} diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index d243b06543..82ee33bdaa 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -99,16 +99,13 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, goto error; } - hw_fence_client->skip_txq_wr_idx = hw_fence_utils_skips_txq_wr_idx(hw_fence_drv_data, - client_id); - /* Alloc Client HFI Headers and Queues */ ret = hw_fence_alloc_client_resources(hw_fence_drv_data, hw_fence_client, mem_descriptor); if (ret) goto error; - /* Initialize signal for communication withe FenceCTL */ + /* Initialize signal for communication with FenceCTL */ ret = hw_fence_init_controller_signal(hw_fence_drv_data, hw_fence_client); if (ret) goto error; From b39fb22f2dfc0586d296d2604a3b258ca1a76ce7 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Tue, 17 Jan 2023 14:14:27 -0800 Subject: [PATCH 062/166] mm-drivers: hw_fence: avoid mem share from rm callback Avoid from hw-fence driver call to share carved-out memory between hlos and cpusys vm if the memory has been already shared by the gh_cpusys_vm_mem_access kernel driver. Change-Id: I0df0216a6153a8982936885f53bebf7fe83db7e9 Signed-off-by: Ingrid Gallardo --- hw_fence/src/hw_fence_drv_utils.c | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index a42329ecb7..f39234c361 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include "hw_fence_drv_priv.h" @@ -404,8 +405,10 @@ static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *da { struct gh_rm_notif_vm_status_payload *vm_status_payload; struct hw_fence_driver_data *drv_data; + struct resource res; gh_vmid_t peer_vmid; gh_vmid_t self_vmid; + int ret; drv_data = container_of(nb, struct hw_fence_driver_data, rm_nb); @@ -430,11 +433,25 @@ static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *da switch (vm_status_payload->vm_status) { case GH_RM_VM_STATUS_READY: - HWFNC_DBG_INIT("init mem\n"); - if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid)) - HWFNC_ERR("failed to share memory\n"); - else - drv_data->vm_ready = true; + ret = gh_cpusys_vm_get_share_mem_info(&res); + if (ret) { + HWFNC_DBG_INIT("mem not shared ret:%d, attempt share\n", ret); + if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid)) + HWFNC_ERR("failed to share memory\n"); + else + drv_data->vm_ready = true; + } else { + if (drv_data->res.start == res.start && + resource_size(&drv_data->res) == resource_size(&res)) { + drv_data->vm_ready = true; + HWFNC_DBG_INIT("mem_ready: add:0x%x size:%d ret:%d\n", res.start, + resource_size(&res), ret); + } else { + HWFNC_ERR("mem-shared mismatch:[0x%x,%d] expected:[0x%x,%d]\n", + res.start, resource_size(&res), drv_data->res.start, + resource_size(&drv_data->res)); + } + } break; case GH_RM_VM_STATUS_RESET: HWFNC_DBG_INIT("reset\n"); From d39f91c9bd64739527d2ffe18c67d81f628c9544 Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 15 Dec 2022 09:41:59 -0800 Subject: [PATCH 063/166] mm-drivers: hw_fence: remove deprecated workarounds for no dpu-ipc signals Remove HW Fence driver deprecated support of platforms without dpu-ipc signaling, e.g. targets prior to kalama. Change-Id: I8491a96040b4c3857a32a9bf6092e53479284a64 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_ipc.h | 8 +- hw_fence/include/hw_fence_drv_priv.h | 50 --------- hw_fence/include/hw_fence_drv_utils.h | 9 -- hw_fence/src/hw_fence_drv_debug.c | 8 +- hw_fence/src/hw_fence_drv_ipc.c | 61 +---------- hw_fence/src/hw_fence_drv_priv.c | 11 -- hw_fence/src/hw_fence_drv_utils.c | 139 ++------------------------ 7 files changed, 16 insertions(+), 270 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_ipc.h b/hw_fence/include/hw_fence_drv_ipc.h index 07b7aa754c..93bafd1e93 100644 --- a/hw_fence/include/hw_fence_drv_ipc.h +++ b/hw_fence/include/hw_fence_drv_ipc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __HW_FENCE_DRV_IPC_H @@ -36,14 +36,10 @@ #define HW_FENCE_IPC_CLIENT_ID_IFE6_PID 17 #define HW_FENCE_IPC_CLIENT_ID_IFE7_PID 18 -#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA 2 -#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO 1 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA 2 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE 2 #define HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE 4 -#define HW_FENCE_IPCC_HW_REV_100 0x00010000 /* Lahaina */ -#define HW_FENCE_IPCC_HW_REV_110 0x00010100 /* Waipio */ #define HW_FENCE_IPCC_HW_REV_170 0x00010700 /* Kalama */ #define HW_FENCE_IPCC_HW_REV_203 0x00020003 /* Pineapple */ @@ -73,7 +69,6 @@ void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data, */ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data); -#ifdef HW_DPU_IPCC /** * hw_fence_ipcc_enable_dpu_signaling() - Enable ipcc signaling for dpu client. * @drv_data: driver data. @@ -81,7 +76,6 @@ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data); * Return: 0 on success or negative errno (-EINVAL) */ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data); -#endif /* HW_DPU_IPCC */ /** * hw_fence_ipcc_get_client_virt_id() - Returns the ipc client virtual id that corresponds to the diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index f1786831fb..f934017749 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -14,9 +14,6 @@ #include #include -/* Add define only for platforms that support IPCC in dpu-hw */ -#define HW_DPU_IPCC 1 - /* max u64 to indicate invalid fence */ #define HW_FENCE_INVALID_PARENT_FENCE (~0ULL) @@ -82,45 +79,6 @@ enum hw_fence_lookup_ops { HW_FENCE_LOOKUP_OP_FIND_FENCE }; -/** - * enum hw_fence_loopback_id - Enum with the clients having a loopback signal (i.e AP to AP signal). - * HW_FENCE_LOOPBACK_DPU_CTL_0: dpu client 0. Used in platforms with no dpu-ipc. - * HW_FENCE_LOOPBACK_DPU_CTL_1: dpu client 1. Used in platforms with no dpu-ipc. - * HW_FENCE_LOOPBACK_DPU_CTL_2: dpu client 2. Used in platforms with no dpu-ipc. - * HW_FENCE_LOOPBACK_DPU_CTL_3: dpu client 3. Used in platforms with no dpu-ipc. - * HW_FENCE_LOOPBACK_DPU_CTL_4: dpu client 4. Used in platforms with no dpu-ipc. - * HW_FENCE_LOOPBACK_DPU_CTL_5: dpu client 5. Used in platforms with no dpu-ipc. - * HW_FENCE_LOOPBACK_DPU_CTX_0: gfx client 0. Used in platforms with no gmu support. - * HW_FENCE_LOOPBACK_VAL_0: debug validation client 0. - * HW_FENCE_LOOPBACK_VAL_1: debug validation client 1. - * HW_FENCE_LOOPBACK_VAL_2: debug validation client 2. - * HW_FENCE_LOOPBACK_VAL_3: debug validation client 3. - * HW_FENCE_LOOPBACK_VAL_4: debug validation client 4. - * HW_FENCE_LOOPBACK_VAL_5: debug validation client 5. - * HW_FENCE_LOOPBACK_VAL_6: debug validation client 6. - */ -enum hw_fence_loopback_id { - HW_FENCE_LOOPBACK_DPU_CTL_0, - HW_FENCE_LOOPBACK_DPU_CTL_1, - HW_FENCE_LOOPBACK_DPU_CTL_2, - HW_FENCE_LOOPBACK_DPU_CTL_3, - HW_FENCE_LOOPBACK_DPU_CTL_4, - HW_FENCE_LOOPBACK_DPU_CTL_5, - HW_FENCE_LOOPBACK_GFX_CTX_0, -#if IS_ENABLED(CONFIG_DEBUG_FS) - HW_FENCE_LOOPBACK_VAL_0 = HW_FENCE_CLIENT_ID_VAL0, - HW_FENCE_LOOPBACK_VAL_1, - HW_FENCE_LOOPBACK_VAL_2, - HW_FENCE_LOOPBACK_VAL_3, - HW_FENCE_LOOPBACK_VAL_4, - HW_FENCE_LOOPBACK_VAL_5, - HW_FENCE_LOOPBACK_VAL_6, -#endif /* CONFIG_DEBUG_FS */ - HW_FENCE_LOOPBACK_MAX, -}; - -#define HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS (HW_FENCE_LOOPBACK_DPU_CTL_5 + 1) - /** * enum hw_fence_client_data_id - Enum with the clients having client_data, an optional * parameter passed from the waiting client and returned @@ -334,8 +292,6 @@ struct hw_fence_client_queue_desc { * @qtime_reg_base: qtimer register base address * @qtime_io_mem: qtimer io mem map * @qtime_size: qtimer io mem map size - * @ctl_start_ptr: pointer to the ctl_start registers of the display hw (platforms with no dpu-ipc) - * @ctl_start_size: size of the ctl_start registers of the display hw (platforms with no dpu-ipc) * @client_id_mask: bitmask for tracking registered client_ids * @clients_register_lock: lock to synchronize clients registration and deregistration * @clients: table with the handles of the registered clients; size is equal to clients_num @@ -409,10 +365,6 @@ struct hw_fence_driver_data { void __iomem *qtime_io_mem; uint32_t qtime_size; - /* base address for dpu ctl start regs */ - void *ctl_start_ptr[HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS]; - uint32_t ctl_start_size[HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS]; - /* synchronize client_ids registration and deregistration */ struct mutex clients_register_lock; @@ -420,10 +372,8 @@ struct hw_fence_driver_data { struct msm_hw_fence_client **clients; bool vm_ready; -#ifdef HW_DPU_IPCC /* state variables */ bool ipcc_dpu_initialized; -#endif /* HW_DPU_IPCC */ }; /** diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 6b35962f41..9063385a23 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -106,15 +106,6 @@ int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data); */ int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data); -/** - * hw_fence_utils_map_ctl_start() - Maps ctl_start registers from dpu hw - * @drv_data: hw fence driver data - * - * Returns zero if success, otherwise returns negative error code. This API is only used - * for simulation purposes in platforms where dpu does not support ipc signal. - */ -int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data); - /** * hw_fence_utils_cleanup_fence() - Cleanup the hw-fence from a specified client * @drv_data: hw fence driver data diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index c3c409a3ab..46eb3e4abe 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -112,7 +112,6 @@ static ssize_t hw_fence_dbg_ipcc_write(struct file *file, const char __user *use drv_data->ipcc_client_vid); } -#ifdef HW_DPU_IPCC /** * hw_fence_dbg_ipcc_dpu_write() - debugfs write to trigger an ipcc irq to dpu core. * @file: file handler. @@ -137,7 +136,6 @@ static const struct file_operations hw_fence_dbg_ipcc_dpu_fops = { .open = simple_open, .write = hw_fence_dbg_ipcc_dpu_write, }; -#endif /* HW_DPU_IPCC */ static const struct file_operations hw_fence_dbg_ipcc_fops = { .open = simple_open, @@ -897,9 +895,9 @@ int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, { struct msm_hw_fence_client *hw_fence_client; - if (client_id < HW_FENCE_LOOPBACK_VAL_0 || client_id > HW_FENCE_LOOPBACK_VAL_6) { + if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id > HW_FENCE_CLIENT_ID_VAL6) { HWFNC_ERR("invalid client_id: %d min: %d max: %d\n", client_id, - HW_FENCE_LOOPBACK_VAL_0, HW_FENCE_LOOPBACK_VAL_6); + HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6); return -EINVAL; } @@ -982,10 +980,8 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data) debugfs_create_file("ipc_trigger", 0600, debugfs_root, drv_data, &hw_fence_dbg_ipcc_fops); -#ifdef HW_DPU_IPCC debugfs_create_file("dpu_trigger", 0600, debugfs_root, drv_data, &hw_fence_dbg_ipcc_dpu_fops); -#endif /* HW_DPU_IPCC */ debugfs_create_file("hw_fence_reset_client", 0600, debugfs_root, drv_data, &hw_fence_reset_client_fops); debugfs_create_file("hw_fence_register_clients", 0600, debugfs_root, drv_data, diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index a2289fc8ee..a9b317e87c 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -32,37 +32,6 @@ struct hw_fence_client_ipc_map { bool send_ipc; }; -/** - * struct hw_fence_clients_ipc_map_no_dpu - Table makes the 'client to signal' mapping, which - * is used by the hw fence driver to trigger ipc signal when the hw fence is already - * signaled. - * This no_dpu version is for targets that do not support dpu client id - * - * Notes: - * The index of this struct must match the enum hw_fence_client_id. - * To change to a loopback signal instead of GMU, change ctx0 row to use: - * {HW_FENCE_IPC_CLIENT_ID_APPS, 20}. - */ -struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_IPC_MAP_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true},/* ctrlq*/ - {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false},/* ctx0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 14, false, true},/*ctl0*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 15, false, true},/*ctl1*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 16, false, true},/*ctl2*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 17, false, true},/*ctl3*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 18, false, true},/*ctl4*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 19, false, true},/*ctl5*/ -#if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, false},/*val0*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, false},/*val1*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, false},/*val2*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, false},/*val3*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, false},/*val4*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, false},/*val5*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, false},/*val6*/ -#endif /* CONFIG_DEBUG_FS */ -}; - /** * struct hw_fence_clients_ipc_map - Table makes the 'client to signal' mapping, which is * used by the hw fence driver to trigger ipc signal when hw fence is already @@ -337,20 +306,6 @@ static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 int ret = 0; switch (hwrev) { - case HW_FENCE_IPCC_HW_REV_100: - drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; - drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; - drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA; - drv_data->ipc_clients_table = hw_fence_clients_ipc_map_no_dpu; - HWFNC_DBG_INIT("ipcc protocol_id: Lahaina\n"); - break; - case HW_FENCE_IPCC_HW_REV_110: - drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; - drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; - drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO; - drv_data->ipc_clients_table = hw_fence_clients_ipc_map_no_dpu; - HWFNC_DBG_INIT("ipcc protocol_id: Waipio\n"); - break; case HW_FENCE_IPCC_HW_REV_170: drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; @@ -381,20 +336,10 @@ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) HWFNC_DBG_H("enable ipc +\n"); - /** - * Attempt to read the ipc version from dt, if not available, then attempt - * to read from the registers. - */ ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-ipc-ver", &val); if (ret || !val) { - /* if no device tree prop, attempt to get the version from the registers*/ - HWFNC_DBG_H("missing hw fences ipc-ver entry or invalid ret:%d val:%d\n", ret, val); - - /* Read IPC Version from Client=0x8 (apps) for protocol=2 (compute_l1) */ - val = readl_relaxed(IPC_PROTOCOLp_CLIENTc_VERSION(drv_data->ipcc_io_mem, - HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA, - HW_FENCE_IPC_CLIENT_ID_APPS_VID)); - HWFNC_DBG_INIT("ipcc version:0x%x\n", val); + HWFNC_ERR("missing hw fences ipc-ver entry or invalid ret:%d val:%d\n", ret, val); + return -EINVAL; } if (_hw_fence_ipcc_hwrev_init(drv_data, val)) { @@ -421,7 +366,6 @@ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) return 0; } -#ifdef HW_DPU_IPCC int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data) { struct hw_fence_client_ipc_map *hw_fence_client; @@ -482,4 +426,3 @@ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data) return 0; } -#endif /* HW_DPU_IPCC */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index f47abca728..e59162b1f5 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -567,15 +567,6 @@ int hw_fence_init(struct hw_fence_driver_data *drv_data) goto exit; } - /* Map ctl_start registers */ - ret = hw_fence_utils_map_ctl_start(drv_data); - if (ret) { - /* This is not fatal error, since platfoms with dpu-ipc - * won't use this option - */ - HWFNC_WARN("no ctl_start regs, won't trigger the frame\n"); - } - /* Init debugfs */ ret = hw_fence_debug_debugfs_register(drv_data); if (ret) { @@ -665,7 +656,6 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, case HW_FENCE_CLIENT_ID_CTL3: case HW_FENCE_CLIENT_ID_CTL4: case HW_FENCE_CLIENT_ID_CTL5: -#ifdef HW_DPU_IPCC /* initialize ipcc signals for dpu clients */ HWFNC_DBG_H("init_controller_signal: DPU client_id_ext:%d initialized:%d\n", hw_fence_client->client_id_ext, drv_data->ipcc_dpu_initialized); @@ -675,7 +665,6 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, /* Init dpu client ipcc signal */ hw_fence_ipcc_enable_dpu_signaling(drv_data); } -#endif /* HW_DPU_IPCC */ break; case HW_FENCE_CLIENT_ID_IPE ... HW_FENCE_CLIENT_ID_IPE + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index a42329ecb7..bca3b959c5 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -52,7 +52,7 @@ /* * Each bit in this mask represents each of the loopback clients supported in - * the enum hw_fence_loopback_id + * the enum hw_fence_client_id */ #define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7fff @@ -170,89 +170,20 @@ void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, } } -static inline int _process_dpu_client_loopback(struct hw_fence_driver_data *drv_data, - int client_id) -{ - int ctl_id = client_id; /* dpu ctl path id is mapped to client id used for the loopback */ - void *ctl_start_reg; - u32 val; - - if (ctl_id > HW_FENCE_LOOPBACK_DPU_CTL_5) { - HWFNC_ERR("invalid ctl_id:%d\n", ctl_id); - return -EINVAL; - } - - ctl_start_reg = drv_data->ctl_start_ptr[ctl_id]; - if (!ctl_start_reg) { - HWFNC_ERR("ctl_start reg not valid for ctl_id:%d\n", ctl_id); - return -EINVAL; - } - - HWFNC_DBG_H("Processing DPU loopback ctl_id:%d\n", ctl_id); - - val = 0x1; /* ctl_start trigger */ -#ifdef CTL_START_SIM - HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x\n", ctl_start_reg, val, ctl_id); - writel_relaxed(val, ctl_start_reg); -#else - HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x (COMMENTED)\n", ctl_id, - ctl_start_reg, val); -#endif - - return 0; -} - -static inline int _process_gfx_client_loopback(struct hw_fence_driver_data *drv_data, - int client_id) -{ - int queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */ - struct msm_hw_fence_queue_payload payload; - int read = 1; - - HWFNC_DBG_IRQ("Processing GFX loopback client_id:%d\n", client_id); - while (read) { - /* - * 'client_id' is the loopback-client-id, not the hw-fence client_id, - * so use GFX hw-fence client id, to get the client data - */ - read = hw_fence_read_queue(drv_data->clients[HW_FENCE_CLIENT_ID_CTX0], &payload, - queue_type); - if (read < 0) { - HWFNC_ERR("unable to read gfx rxq\n"); - break; - } - HWFNC_DBG_L("GFX loopback rxq read: hash:%llu ctx:%llu seq:%llu f:%llu e:%lu\n", - payload.hash, payload.ctxt_id, payload.seqno, payload.flags, payload.error); - } - - return read; -} - static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int client_id) { int ret; - HWFNC_DBG_H("Processing loopback client_id:%d\n", client_id); + HWFNC_DBG_H("Processing doorbell client_id:%d\n", client_id); switch (client_id) { - case HW_FENCE_LOOPBACK_DPU_CTL_0: - case HW_FENCE_LOOPBACK_DPU_CTL_1: - case HW_FENCE_LOOPBACK_DPU_CTL_2: - case HW_FENCE_LOOPBACK_DPU_CTL_3: - case HW_FENCE_LOOPBACK_DPU_CTL_4: - case HW_FENCE_LOOPBACK_DPU_CTL_5: - ret = _process_dpu_client_loopback(drv_data, client_id); - break; - case HW_FENCE_LOOPBACK_GFX_CTX_0: - ret = _process_gfx_client_loopback(drv_data, client_id); - break; #if IS_ENABLED(CONFIG_DEBUG_FS) - case HW_FENCE_LOOPBACK_VAL_0: - case HW_FENCE_LOOPBACK_VAL_1: - case HW_FENCE_LOOPBACK_VAL_2: - case HW_FENCE_LOOPBACK_VAL_3: - case HW_FENCE_LOOPBACK_VAL_4: - case HW_FENCE_LOOPBACK_VAL_5: - case HW_FENCE_LOOPBACK_VAL_6: + case HW_FENCE_CLIENT_ID_VAL0: + case HW_FENCE_CLIENT_ID_VAL1: + case HW_FENCE_CLIENT_ID_VAL2: + case HW_FENCE_CLIENT_ID_VAL3: + case HW_FENCE_CLIENT_ID_VAL4: + case HW_FENCE_CLIENT_ID_VAL5: + case HW_FENCE_CLIENT_ID_VAL6: ret = process_validation_client_loopback(drv_data, client_id); break; #endif /* CONFIG_DEBUG_FS */ @@ -266,10 +197,10 @@ static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int c void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags) { - int client_id = HW_FENCE_LOOPBACK_DPU_CTL_0; + int client_id = HW_FENCE_CLIENT_ID_CTL0; u64 mask; - for (; client_id < HW_FENCE_LOOPBACK_MAX; client_id++) { + for (; client_id <= HW_FENCE_CLIENT_ID_VAL6; client_id++) { mask = 1 << client_id; if (mask & db_flags) { HWFNC_DBG_H("client_id:%d signaled! flags:0x%llx\n", client_id, db_flags); @@ -946,54 +877,6 @@ int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data) return ret; } -static int _map_ctl_start(struct hw_fence_driver_data *drv_data, u32 ctl_id, - void **iomem_ptr, uint32_t *iomem_size) -{ - u32 reg_config[2]; - void __iomem *ptr; - char name[30] = {0}; - int ret; - - snprintf(name, sizeof(name), "qcom,dpu-ctl-start-%d-reg", ctl_id); - ret = of_property_read_u32_array(drv_data->dev->of_node, name, reg_config, 2); - if (ret) - return 0; /* this is an optional property */ - - /* Mmap registers */ - ptr = devm_ioremap(drv_data->dev, reg_config[0], reg_config[1]); - if (!ptr) { - HWFNC_ERR("failed to ioremap %s reg\n", name); - return -ENOMEM; - } - - *iomem_ptr = ptr; - *iomem_size = reg_config[1]; - - HWFNC_DBG_INIT("mapped ctl_start ctl_id:%d name:%s address:0x%x size:0x%x io_mem:0x%pK\n", - ctl_id, name, reg_config[0], reg_config[1], ptr); - - return 0; -} - -int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data) -{ - u32 ctl_id = HW_FENCE_LOOPBACK_DPU_CTL_0; - - for (; ctl_id <= HW_FENCE_LOOPBACK_DPU_CTL_5; ctl_id++) { - if (_map_ctl_start(drv_data, ctl_id, &drv_data->ctl_start_ptr[ctl_id], - &drv_data->ctl_start_size[ctl_id])) { - HWFNC_ERR("cannot map ctl_start ctl_id:%d\n", ctl_id); - } else { - if (drv_data->ctl_start_ptr[ctl_id]) - HWFNC_DBG_INIT("mapped ctl_id:%d ctl_start_ptr:0x%pK size:%u\n", - ctl_id, drv_data->ctl_start_ptr[ctl_id], - drv_data->ctl_start_size[ctl_id]); - } - } - - return 0; -} - enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data, enum hw_fence_client_id client_id) { From 26eb7e4268341a414042a2493ba53f0a220f8614 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 14 Mar 2023 16:40:55 -0700 Subject: [PATCH 064/166] mm-drivers: hw_fence: remove destroy fence and fence array ioctls Remove ioctls to destroy fence and fence array. Existing implementation fails to close file descriptors properly, and closing fds must be done by caller, not by HW Fence driver. Change-Id: I7a84c87475144f3e8a90acf44b7cf8678b6cc2dd Signed-off-by: Grace An --- hw_fence/src/hw_fence_ioctl.c | 70 ----------------------------------- 1 file changed, 70 deletions(-) diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 456732d0db..5fa02ef489 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -22,12 +22,8 @@ #define HW_SYNC_IOC_UNREG_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 11, unsigned long) #define HW_SYNC_IOC_CREATE_FENCE _IOWR(HW_SYNC_IOC_MAGIC, 12,\ struct hw_fence_sync_create_data) -#define HW_SYNC_IOC_DESTROY_FENCE _IOWR(HW_SYNC_IOC_MAGIC, 13,\ - struct hw_fence_sync_create_data) #define HW_SYNC_IOC_CREATE_FENCE_ARRAY _IOWR(HW_SYNC_IOC_MAGIC, 14,\ struct hw_fence_array_sync_create_data) -#define HW_SYNC_IOC_DESTROY_FENCE_ARRAY _IOWR(HW_SYNC_IOC_MAGIC, 15,\ - struct hw_fence_array_sync_create_data) #define HW_SYNC_IOC_REG_FOR_WAIT _IOWR(HW_SYNC_IOC_MAGIC, 16, int) #define HW_SYNC_IOC_FENCE_SIGNAL _IOWR(HW_SYNC_IOC_MAGIC, 17, unsigned long) #define HW_SYNC_IOC_FENCE_WAIT _IOWR(HW_SYNC_IOC_MAGIC, 18, int) @@ -317,35 +313,6 @@ exit: return ret; } -static long hw_sync_ioctl_destroy_fence(struct hw_sync_obj *obj, unsigned long arg) -{ - int fd; - struct hw_dma_fence *fence; - struct hw_fence_sync_create_data data; - - if (!_is_valid_client(obj)) - return -EINVAL; - - if (copy_from_user(&data, (void __user *)arg, sizeof(data))) - return -EFAULT; - - fd = data.fence; - fence = (struct hw_dma_fence *)_hw_sync_get_fence(fd); - - if (!fence) { - HWFNC_ERR("fence for fd:%d not found\n", fd); - return -EINVAL; - } - - /* Decrement the refcount that hw_sync_get_fence increments */ - dma_fence_put(&fence->base); - - /* To destroy fence */ - dma_fence_put(&fence->base); - - return 0; -} - static void _put_child_fences(int i, struct dma_fence **fences) { int fence_idx; @@ -448,41 +415,6 @@ exit: return ret; } -static long hw_sync_ioctl_destroy_fence_array(struct hw_sync_obj *obj, unsigned long arg) -{ - struct dma_fence_array *fence_array; - struct dma_fence *fence; - struct hw_fence_array_sync_create_data data; - int fd; - - if (!_is_valid_client(obj)) - return -EINVAL; - - if (copy_from_user(&data, (void __user *)arg, sizeof(data))) - return -EFAULT; - - fd = data.fence_array_fd; - fence = (struct dma_fence *)_hw_sync_get_fence(fd); - if (!fence) { - HWFNC_ERR("Invalid fence fd: %d\n", fd); - return -EINVAL; - } - - /* Decrement the refcount that hw_sync_get_fence increments */ - dma_fence_put(fence); - - fence_array = to_dma_fence_array(fence); - if (!fence_array) { - HWFNC_ERR("Invalid fence array fd: %d\n", fd); - return -EINVAL; - } - - /* Destroy fence array */ - dma_fence_put(&fence_array->base); - - return 0; -} - /* * this IOCTL only supports receiving one fence as input-parameter, which can be * either a "dma_fence" or a "dma_fence_array", but eventually we would expand @@ -658,9 +590,7 @@ static const struct hw_sync_ioctl_def hw_sync_debugfs_ioctls[] = { HW_IOCTL_DEF(HW_SYNC_IOC_REG_CLIENT, hw_sync_ioctl_reg_client), HW_IOCTL_DEF(HW_SYNC_IOC_UNREG_CLIENT, hw_sync_ioctl_unreg_client), HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE, hw_sync_ioctl_create_fence), - HW_IOCTL_DEF(HW_SYNC_IOC_DESTROY_FENCE, hw_sync_ioctl_destroy_fence), HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE_ARRAY, hw_sync_ioctl_create_fence_array), - HW_IOCTL_DEF(HW_SYNC_IOC_DESTROY_FENCE_ARRAY, hw_sync_ioctl_destroy_fence_array), HW_IOCTL_DEF(HW_SYNC_IOC_REG_FOR_WAIT, hw_sync_ioctl_reg_for_wait), HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_SIGNAL, hw_sync_ioctl_fence_signal), HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_WAIT, hw_sync_ioctl_fence_wait), From 00044be1d41e08ea1763856b8a51b59df1d988e4 Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 20 Mar 2023 16:37:35 -0700 Subject: [PATCH 065/166] mm-drivers: hw_fence: allow synx create and import with more flags Synx clients may specify additional flags (e.g. flags to specify a global fence) when creating or importing hwfence-backed synx objects. Ensure HW Fence Driver support of these flags. Change-Id: I38d94875be09da3506d3939077099c05fa9235f6 Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence_synx_translation.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index f35bfcd488..6970eb4d60 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -169,7 +169,9 @@ int synx_hwfence_create(struct synx_session *session, struct synx_create_params return -SYNX_INVALID; } - if (IS_ERR_OR_NULL(params->h_synx) || (params->flags != SYNX_CREATE_DMA_FENCE) || + if (IS_ERR_OR_NULL(params->h_synx) || (params->flags > SYNX_CREATE_MAX_FLAGS) || + !(params->flags & SYNX_CREATE_DMA_FENCE) || + (params->flags & SYNX_CREATE_CSL_FENCE) || IS_ERR_OR_NULL(params->fence)) { HWFNC_ERR("synx_id:%d invalid create params h_synx:0x%pK flags:0x%x fence:0x%pK\n", session->type, params->h_synx, params->flags, params->fence); @@ -259,7 +261,8 @@ static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->new_h_synx) || - (params->flags != SYNX_IMPORT_DMA_FENCE) || IS_ERR_OR_NULL(params->fence)) { + !(params->flags & SYNX_IMPORT_DMA_FENCE) || + (params->flags & SYNX_IMPORT_SYNX_FENCE) || IS_ERR_OR_NULL(params->fence)) { HWFNC_ERR("invalid client:0x%pK params:0x%pK h_synx:0x%pK flags:0x%x fence:0x%pK\n", client, params, IS_ERR_OR_NULL(params) ? NULL : params->new_h_synx, IS_ERR_OR_NULL(params) ? 0 : params->flags, From fc0379a5784c4ce5ddf8ec1899ccd9290864d01a Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 7 Mar 2023 14:00:49 -0800 Subject: [PATCH 066/166] mm-drivers: hw_fence: Rename RM APIs As we are merging upstream patches, resolve conflicts of namespaces in downstream modules. Change-Id: I2aa4b0f2cea859cddd2fb537fce7a6908999e7d4 Signed-off-by: Prakruthi Deepak Heragu Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_utils.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index bca3b959c5..c615adda29 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -314,8 +314,13 @@ static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, sgl->sgl_entries[0].ipa_base = drv_data->res.start; sgl->sgl_entries[0].size = resource_size(&drv_data->res); +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + ret = ghd_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label, + acl, sgl, NULL, &drv_data->memparcel); +#else ret = gh_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label, acl, sgl, NULL, &drv_data->memparcel); +#endif if (ret) { HWFNC_ERR("%s: gh_rm_mem_share failed addr=%x size=%u err=%d\n", __func__, drv_data->res.start, drv_data->size, ret); @@ -350,11 +355,19 @@ static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *da vm_status_payload->vm_status != GH_RM_VM_STATUS_RESET) goto end; +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + if (ghd_rm_get_vmid(drv_data->peer_name, &peer_vmid)) + goto end; + + if (ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid)) + goto end; +#else if (gh_rm_get_vmid(drv_data->peer_name, &peer_vmid)) goto end; if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid)) goto end; +#endif if (peer_vmid != vm_status_payload->vmid) goto end; From cda6ac87d1b793e98add774c83468713b1780834 Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 10 Feb 2023 17:26:22 -0800 Subject: [PATCH 067/166] mm-drivers: hw_fence: add support to read hw fence ctl events Add support to read hw fence ctl events through debugfs node from the carved out memory region shared with Fence Controller. Change-Id: I508695efcb8c7aa8fab9db2086af1ec1ff0ddd84 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 26 +++++ hw_fence/include/hw_fence_drv_utils.h | 4 +- hw_fence/src/hw_fence_drv_debug.c | 135 ++++++++++++++++++++++++++ hw_fence/src/hw_fence_drv_priv.c | 26 +++++ hw_fence/src/hw_fence_drv_utils.c | 26 +++++ 5 files changed, 216 insertions(+), 1 deletion(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index f934017749..359347b171 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -72,6 +72,12 @@ */ #define HW_FENCE_PAYLOAD_REV(major, minor) (major << 8 | (minor & 0xFF)) +/** + * HW_FENCE_EVENT_MAX_DATA: + * Maximum data that can be added to the debug event + */ +#define HW_FENCE_EVENT_MAX_DATA 12 + enum hw_fence_lookup_ops { HW_FENCE_LOOKUP_OP_CREATE = 0x1, HW_FENCE_LOOKUP_OP_DESTROY, @@ -265,6 +271,8 @@ struct hw_fence_client_queue_desc { * @clients_num: number of supported hw fence clients (configured based on device-tree) * @hw_fences_tbl: pointer to the hw-fences table * @hw_fences_tbl_cnt: number of elements in the hw-fence table + * @events: start address of hw fence debug events + * @total_events: total number of hw fence debug events supported * @client_lock_tbl: pointer to the per-client locks table * @client_lock_tbl_cnt: number of elements in the locks table * @hw_fences_mem_desc: memory descriptor for the hw-fence table @@ -320,6 +328,10 @@ struct hw_fence_driver_data { struct msm_hw_fence *hw_fences_tbl; u32 hw_fences_tbl_cnt; + /* events */ + struct msm_hw_fence_event *events; + u32 total_events; + /* Table with a Per-Client Lock */ u64 *client_lock_tbl; u32 client_lock_tbl_cnt; @@ -408,6 +420,20 @@ struct msm_hw_fence_queue_payload { u32 reserve; }; +/** + * struct msm_hw_fence_event - hardware fence ctl debug event + * time: qtime when the event is logged + * cpu: cpu id where the event is logged + * data_cnt: count of valid data available in the data field + * data: debug data logged by the event + */ +struct msm_hw_fence_event { + u64 time; + u32 cpu; + u32 data_cnt; + u32 data[HW_FENCE_EVENT_MAX_DATA]; +}; + /** * struct msm_hw_fence - structure holding each hw fence data. * @valid: field updated when a hw-fence is reserved. True if hw-fence is in use diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 9063385a23..43871ee571 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -30,12 +30,14 @@ * HW_FENCE_MEM_RESERVE_LOCKS_REGION: Reserve memory for the per-client locks memory region. * HW_FENCE_MEM_RESERVE_TABLE: Reserve memory for the hw-fences global table. * HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: Reserve memory per-client for the rx/tx queues. + * HW_FENCE_MEM_RESERVE_EVENTS_BUFF: Reserve memory for the debug events */ enum hw_fence_mem_reserve { HW_FENCE_MEM_RESERVE_CTRL_QUEUE, HW_FENCE_MEM_RESERVE_LOCKS_REGION, HW_FENCE_MEM_RESERVE_TABLE, - HW_FENCE_MEM_RESERVE_CLIENT_QUEUE + HW_FENCE_MEM_RESERVE_CLIENT_QUEUE, + HW_FENCE_MEM_RESERVE_EVENTS_BUFF }; /** diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 46eb3e4abe..28fb37cccc 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -14,6 +14,11 @@ #define HW_FENCE_DEBUG_MAX_LOOPS 200 +/* event dump data includes one "32-bit" element + "|" separator */ +#define HW_FENCE_MAX_DATA_PER_EVENT_DUMP (HW_FENCE_EVENT_MAX_DATA * 9) + +#define HFENCE_EVT_MSG "[%d][cpu:%d][%lu] data[%d]:%s\n" + u32 msm_hw_fence_debug_level = HW_FENCE_PRINTK; /** @@ -540,6 +545,129 @@ static int dump_full_table(struct hw_fence_driver_data *drv_data, char *buf, u32 return len; } +static inline int _dump_event(struct msm_hw_fence_event *event, char *buf, int len, int max_size, + u32 index) +{ + char data[HW_FENCE_MAX_DATA_PER_EVENT_DUMP]; + u32 data_cnt; + int i, tmp_len = 0, ret = 0; + + if (!event->time) + return 0; + + memset(&data, 0, sizeof(data)); + if (event->data_cnt > HW_FENCE_EVENT_MAX_DATA) { + HWFNC_ERR("event[%d] has invalid data_cnt:%lu greater than max_data_cnt:%lu\n", + index, event->data_cnt, HW_FENCE_EVENT_MAX_DATA); + data_cnt = HW_FENCE_EVENT_MAX_DATA; + } else { + data_cnt = event->data_cnt; + } + + for (i = 0; i < data_cnt; i++) + tmp_len += scnprintf(data + tmp_len, HW_FENCE_MAX_DATA_PER_EVENT_DUMP - tmp_len, + "%lx|", event->data[i]); + + ret = scnprintf(buf + len, max_size - len, HFENCE_EVT_MSG, index, event->cpu, event->time, + event->data_cnt, data); + + HWFNC_DBG_INFO(HFENCE_EVT_MSG, index, event->cpu, event->time, event->data_cnt, data); + + return ret; +} + +/** + * hw_fence_dbg_dump_events_rd() - debugfs read to dump the fctl events. + * @file: file handler. + * @user_buf: user buffer content for debugfs. + * @user_buf_size: size of the user buffer. + * @ppos: position offset of the user buffer. + */ +static ssize_t hw_fence_dbg_dump_events_rd(struct file *file, char __user *user_buf, + size_t user_buf_size, loff_t *ppos) +{ + struct hw_fence_driver_data *drv_data; + u32 entry_size = sizeof(struct msm_hw_fence_event), max_size = SZ_4K; + char *buf = NULL; + int len = 0; + static u64 start_time; + static int index, start_index; + static bool wraparound; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data %d\n", file); + return -EINVAL; + } + drv_data = file->private_data; + + if (!drv_data->events) { + HWFNC_ERR("events not supported\n"); + return -EINVAL; + } + + if (wraparound && index >= start_index) { + HWFNC_DBG_H("no more data index:%d total_events:%d\n", index, + drv_data->total_events); + start_time = 0; + index = 0; + wraparound = false; + return 0; + } + + if (user_buf_size < entry_size) { + HWFNC_ERR("Not enough buff size:%d to dump entries:%d\n", user_buf_size, + entry_size); + return -EINVAL; + } + + buf = kzalloc(max_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* find index of earliest event */ + if (!start_time) { + mb(); /* make sure data is ready before read */ + for (index = 0; index < drv_data->total_events; index++) { + u64 time = drv_data->events[index].time; + + if (time && (!start_time || time < start_time)) { + start_time = time; + start_index = index; + } + } + index = start_index; + HWFNC_DBG_H("events:0x%pK start_index:%d start_time:%llu total_events:%d\n", + drv_data->events, start_index, start_time, drv_data->total_events); + } + + HWFNC_DBG_H("++ dump_events index:%d qtime:%llu\n", index, hw_fence_get_qtime(drv_data)); + while ((!wraparound || index < start_index) && len < (max_size - entry_size)) { + len += _dump_event(&drv_data->events[index], buf, len, max_size, index); + index++; + if (index >= drv_data->total_events) { + index = 0; + wraparound = true; + } + } + HWFNC_DBG_H("-- dump_events: index:%d qtime:%llu\n", index, hw_fence_get_qtime(drv_data)); + + if (len <= 0 || len > user_buf_size) { + HWFNC_ERR("len:%d invalid buff size:%d\n", len, user_buf_size); + len = 0; + goto exit; + } + + if (copy_to_user(user_buf, buf, len)) { + HWFNC_ERR("failed to copy to user!\n"); + len = -EFAULT; + goto exit; + } + *ppos += len; +exit: + kfree(buf); + return len; +} + /** * hw_fence_dbg_dump_queues_wr() - debugfs wr to dump the hw-fences queues. * @file: file handler. @@ -955,6 +1083,11 @@ static const struct file_operations hw_fence_dump_queues_fops = { .write = hw_fence_dbg_dump_queues_wr, }; +static const struct file_operations hw_fence_dump_events_fops = { + .open = simple_open, + .read = hw_fence_dbg_dump_events_rd, +}; + static const struct file_operations hw_fence_create_join_fence_fops = { .open = simple_open, .write = hw_fence_dbg_create_join_fence, @@ -1004,6 +1137,8 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data) debugfs_create_file("hw_sync", 0600, debugfs_root, NULL, &hw_sync_debugfs_fops); debugfs_create_u64("hw_fence_lock_wake_cnt", 0600, debugfs_root, &drv_data->debugfs_data.lock_wake_cnt); + debugfs_create_file("hw_fence_dump_events", 0600, debugfs_root, drv_data, + &hw_fence_dump_events_fops); return 0; } diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index e59162b1f5..8fba461b66 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -502,6 +502,27 @@ static int init_hw_fences_table(struct hw_fence_driver_data *drv_data) return 0; } +static int init_hw_fences_events(struct hw_fence_driver_data *drv_data) +{ + phys_addr_t phys; + void *ptr; + u32 size; + int ret; + + ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_EVENTS_BUFF, &phys, &ptr, + &size, 0); + if (ret) { + HWFNC_DBG_INFO("Failed to reserve events buffer %d\n", ret); + return -ENOMEM; + } + drv_data->events = (struct msm_hw_fence_event *)ptr; + drv_data->total_events = size / sizeof(struct msm_hw_fence_event); + HWFNC_DBG_INIT("events:0x%pK total_events:%u event_sz:%u total_size:%u\n", drv_data->events, + drv_data->total_events, sizeof(struct msm_hw_fence_event), size); + + return 0; +} + static int init_ctrl_queue(struct hw_fence_driver_data *drv_data) { struct msm_hw_fence_mem_addr *mem_descriptor; @@ -553,6 +574,11 @@ int hw_fence_init(struct hw_fence_driver_data *drv_data) if (ret) goto exit; + /* Initialize event log */ + ret = init_hw_fences_events(drv_data); + if (ret) + HWFNC_DBG_INFO("Unable to init events\n"); + /* Map ipcc registers */ ret = hw_fence_utils_map_ipcc(drv_data); if (ret) { diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index c615adda29..98033e06ee 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -56,6 +56,12 @@ */ #define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7fff +/** + * HW_FENCE_MAX_EVENTS: + * Maximum number of HW Fence debug events + */ +#define HW_FENCE_MAX_EVENTS 1000 + /** * struct hw_fence_client_types - Table describing all supported client types, used to parse * device-tree properties related to client queue size. @@ -472,6 +478,8 @@ char *_get_mem_reserve_type(enum hw_fence_mem_reserve type) return "HW_FENCE_MEM_RESERVE_TABLE"; case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: return "HW_FENCE_MEM_RESERVE_CLIENT_QUEUE"; + case HW_FENCE_MEM_RESERVE_EVENTS_BUFF: + return "HW_FENCE_MEM_RESERVE_EVENTS_BUFF"; } return "Unknown"; @@ -483,6 +491,8 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, { int ret = 0; u32 start_offset = 0; + u32 remaining_size_bytes; + u32 total_events; switch (type) { case HW_FENCE_MEM_RESERVE_CTRL_QUEUE: @@ -512,6 +522,22 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, start_offset = drv_data->hw_fence_client_queue_size[client_id].start_offset; *size = drv_data->hw_fence_client_queue_size[client_id].type->mem_size; break; + case HW_FENCE_MEM_RESERVE_EVENTS_BUFF: + start_offset = drv_data->used_mem_size; + remaining_size_bytes = drv_data->size - start_offset; + if (start_offset >= drv_data->size || + remaining_size_bytes < sizeof(struct msm_hw_fence_event)) { + HWFNC_DBG_INFO("no space for events total_sz:%lu offset:%lu evt_sz:%lu\n", + drv_data->size, start_offset, sizeof(struct msm_hw_fence_event)); + ret = -ENOMEM; + goto exit; + } + + total_events = remaining_size_bytes / sizeof(struct msm_hw_fence_event); + if (total_events > HW_FENCE_MAX_EVENTS) + total_events = HW_FENCE_MAX_EVENTS; + *size = total_events * sizeof(struct msm_hw_fence_event); + break; default: HWFNC_ERR("Invalid mem reserve type:%d\n", type); ret = -EINVAL; From 6624a5c4539fc5143987ae4b70163eec26f93fe6 Mon Sep 17 00:00:00 2001 From: Varsha Suresh Date: Wed, 7 Dec 2022 11:19:11 -0800 Subject: [PATCH 068/166] mm-drivers: disp: Add support for Bazel build system - add support to build mm-drivers module using DDK framework for pineapple - add macro that makes it easy to register new modules Change-Id: I704bbe946f4d1053a85bfb122408c201b0f155b2 Signed-off-by: Varsha Suresh --- BUILD.bazel | 36 ++++++++++++++++ mm_module_build.bzl | 103 ++++++++++++++++++++++++++++++++++++++++++++ mm_modules.bzl | 44 +++++++++++++++++++ target.bzl | 16 +++++++ 4 files changed, 199 insertions(+) create mode 100644 BUILD.bazel create mode 100644 mm_module_build.bzl create mode 100644 mm_modules.bzl create mode 100644 target.bzl diff --git a/BUILD.bazel b/BUILD.bazel new file mode 100644 index 0000000000..77944804ce --- /dev/null +++ b/BUILD.bazel @@ -0,0 +1,36 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") + +package( + default_visibility = [ + "//visibility:public"], +) + +ddk_headers( + name = "mm_drivers_configs", + hdrs = glob([ + "config/*.h"]), + includes = ["config"] +) + +ddk_headers( + name = "hw_fence_headers", + hdrs = glob([ + "hw_fence/include/*.h"]), + includes = ["hw_fence/include"] +) + +ddk_headers( + name = "sync_fence_uapi_headers", + hdrs = glob([ + "sync_fence/include/uapi/sync_fence/*.h", + "sync_fence/include/*.h"]), + includes = ["sync_fence/include"] +) + +ddk_headers( + name = "mm_drivers_headers", + hdrs = [":mm_drivers_configs", ":hw_fence_headers", ":sync_fence_uapi_headers"] +) + +load(":target.bzl", "define_pineapple") +define_pineapple() \ No newline at end of file diff --git a/mm_module_build.bzl b/mm_module_build.bzl new file mode 100644 index 0000000000..dc708705b5 --- /dev/null +++ b/mm_module_build.bzl @@ -0,0 +1,103 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_module","ddk_submodule") +load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") +load("//msm-kernel:target_variants.bzl", "get_all_variants") + +def _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps): + processed_config_srcs = {} + + for config_src_name in config_srcs: + config_src = config_srcs[config_src_name] + + if type(config_src) == "list": + processed_config_srcs[config_src_name] = {True: config_src} + else: + processed_config_srcs[config_src_name] = config_src + + module = struct( + name = name, + path = path, + srcs = srcs, + config_srcs = processed_config_srcs, + config_option = config_option, + deps = deps, + ) + + module_map[name] = module + +def _get_config_choices(map, options): + choices = [] + for option in map: + choices.extend(map[option].get(option in options,[])) + return choices + +def _get_kernel_build_options(modules, config_options): + all_options = {option: True for option in config_options} + all_options = all_options | {module.config_option: True for module in modules if module.config_option} + return all_options + +def _get_kernel_build_module_srcs(module, options, formatter): + srcs = module.srcs + _get_config_choices(module.config_srcs, options) + print("-",module.name,",",module.config_option,",srcs =",srcs) + module_path = "{}/".format(module.path) if module.path else "" + return ["{}{}".format(module_path, formatter(src)) for src in srcs] + +def _get_kernel_build_module_deps(module, options, formatter): + return [formatter(dep) for dep in module.deps] + +def mm_driver_module_entry(hdrs = []): + module_map = {} + + def register(name, path = None, config_option = None, srcs = [], config_srcs = {}, deps =[]): + _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps) + return struct( + register = register, + get = module_map.get, + hdrs = hdrs, + module_map = module_map + ) + +def define_target_variant_modules(target, variant, registry, modules, config_options = []): + kernel_build = "{}_{}".format(target, variant) + kernel_build_label = "//msm-kernel:{}".format(kernel_build) + modules = [registry.get(module_name) for module_name in modules] + options = _get_kernel_build_options(modules, config_options) + build_print = lambda message : print("{}: {}".format(kernel_build, message)) + formatter = lambda s : s.replace("%b", kernel_build).replace("%t", target) + headers = ["//msm-kernel:all_headers"] + registry.hdrs + all_module_rules = [] + + for module in modules: + rule_name = "{}_{}".format(kernel_build, module.name) + module_srcs = _get_kernel_build_module_srcs(module, options, formatter) + + if not module_srcs: + continue + + ddk_submodule( + name = rule_name, + srcs = module_srcs, + out = "{}.ko".format(module.name), + deps = headers + _get_kernel_build_module_deps(module, options, formatter), + local_defines = options.keys(), + ) + all_module_rules.append(rule_name) + + ddk_module( + name = "{}_mm_drivers".format(kernel_build), + kernel_build = kernel_build_label, + deps = all_module_rules, + ) + copy_to_dist_dir( + name = "{}_mm_drivers_dist".format(kernel_build), + data = [":{}_mm_drivers".format(kernel_build)], + dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target), + flat = True, + wipe_dist_dir = False, + allow_duplicate_filenames = False, + mode_overrides = {"**/*": "644"}, + log = "info", + ) + +def define_consolidate_gki_modules(target, registry, modules, config_options = []): + for (targets, variant) in get_all_variants(): + define_target_variant_modules(targets, variant, registry, modules, config_options) \ No newline at end of file diff --git a/mm_modules.bzl b/mm_modules.bzl new file mode 100644 index 0000000000..ef8b175e79 --- /dev/null +++ b/mm_modules.bzl @@ -0,0 +1,44 @@ +load(":mm_module_build.bzl", "mm_driver_module_entry") + +HW_FENCE_PATH = "hw_fence" +MSM_EXT_DISPLAY_PATH = "msm_ext_display" +SYNC_FENCE_PATH = "sync_fence" + +mm_driver_modules = mm_driver_module_entry([":mm_drivers_headers"]) +module_entry = mm_driver_modules.register + +#--------------- MM-DRIVERS MODULES ------------------ + +module_entry( + name = "hw_fence", + path = HW_FENCE_PATH + "/src", + config_option = "CONFIG_QTI_HW_FENCE", + config_srcs = { + "CONFIG_DEBUG_FS" : [ + "hw_fence_ioctl.c", + ] + }, + srcs = ["hw_fence_drv_debug.c", + "hw_fence_drv_ipc.c", + "hw_fence_drv_priv.c", + "hw_fence_drv_utils.c", + "msm_hw_fence.c", + "msm_hw_fence_synx_translation.c"], + deps =[ + "//vendor/qcom/opensource/synx-kernel:synx_headers" + ] +) + +module_entry( + name = "msm_ext_display", + path = MSM_EXT_DISPLAY_PATH + "/src", + config_option = "CONFIG_MSM_EXT_DISPLAY", + srcs = ["msm_ext_display.c"], +) + +module_entry( + name = "sync_fence", + path = SYNC_FENCE_PATH + "/src", + config_option = "CONFIG_QCOM_SPEC_SYNC", + srcs = ["qcom_sync_file.c"], +) \ No newline at end of file diff --git a/target.bzl b/target.bzl new file mode 100644 index 0000000000..6d63bab3c8 --- /dev/null +++ b/target.bzl @@ -0,0 +1,16 @@ +load(":mm_modules.bzl", "mm_driver_modules") +load(":mm_module_build.bzl", "define_consolidate_gki_modules") + +def define_pineapple(): + define_consolidate_gki_modules( + target = "pineapple", + registry = mm_driver_modules, + modules = [ + "hw_fence", + "msm_ext_display", + "sync_fence", + ], + config_options = [ + "CONFIG_DEBUG_FS", + ], +) \ No newline at end of file From 6db4e6a849902c15e62b72675b201eb2a226756d Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Mon, 10 Apr 2023 16:34:17 -0700 Subject: [PATCH 069/166] mm-drivers: hw_fence: resolve compilation errors for kalama Fix compilation errors for kalama target, where cpusys vm share memory driver is not present. Change-Id: I4f7762ad747490ba166f8e9ae27dd0191de3f021 Signed-off-by: Ingrid Gallardo --- hw_fence/src/hw_fence_drv_utils.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 9162cc7eea..9e8629b428 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -10,7 +10,9 @@ #include #include #include +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) #include +#endif #include #include "hw_fence_drv_priv.h" @@ -343,6 +345,15 @@ static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, return ret; } +static int _is_mem_shared(struct resource *res) +{ +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + return gh_cpusys_vm_get_share_mem_info(res); +#else + return -EINVAL; +#endif +} + static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *data) { struct gh_rm_notif_vm_status_payload *vm_status_payload; @@ -383,7 +394,7 @@ static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *da switch (vm_status_payload->vm_status) { case GH_RM_VM_STATUS_READY: - ret = gh_cpusys_vm_get_share_mem_info(&res); + ret = _is_mem_shared(&res); if (ret) { HWFNC_DBG_INIT("mem not shared ret:%d, attempt share\n", ret); if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid)) From 2f76940f774c193f8cf52ea69ce0897f28d46fe5 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 11 Apr 2023 13:42:27 -0700 Subject: [PATCH 070/166] mm-drivers: hw_fence: add check for invalid client_id param in ioctl Current implementation allows ioctl to deregister hw-fence client with client_id that does not match hw_sync_obj. This can cause a double-free if user-space deregisters the wrong file descriptor by mistake. Instead, fail the ioctl early if it has these invalid parameters. Change-Id: Ib781be18d2f71c24d6aa4fc08eeba44649da13da Signed-off-by: Grace An --- hw_fence/src/hw_fence_ioctl.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 5fa02ef489..d7eab54fee 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -213,8 +213,13 @@ static long hw_sync_ioctl_unreg_client(struct hw_sync_obj *obj, unsigned long ar { int client_id = _get_client_id(obj, arg); - if (IS_ERR(&client_id)) + if (IS_ERR(&client_id)) { return client_id; + } else if (client_id != obj->client_id) { + HWFNC_ERR("deregistering hw-fence client %d with invalid client_id arg:%d\n", + obj->client_id, client_id); + return -EINVAL; + } return msm_hw_fence_deregister(obj->client_handle); } From 2a557e339bf966d818de74d55ead4a3b8a65f7b9 Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 2 Mar 2023 12:41:51 -0800 Subject: [PATCH 071/166] mm-drivers: hw_fence: add interface to dump debug data Add interfaces to support dumping debug data. These interfaces should be used by drivers in case of a hw-fence error detected. Change-Id: Iab46c8e9dea8ffead06f192c8d01182912fffcce Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_debug.h | 12 +- hw_fence/src/hw_fence_drv_debug.c | 276 ++++++++++++++++++-------- hw_fence/src/msm_hw_fence.c | 69 +++++++ 3 files changed, 272 insertions(+), 85 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index a1d66e0cdd..b6f6f14e19 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __HW_FENCE_DRV_DEBUG @@ -60,6 +60,9 @@ extern u32 msm_hw_fence_debug_level; #define HWFNC_DBG_LOCK(fmt, ...) \ dprintk(HW_FENCE_LOCK, "[hwfence:%s:%d][dbglock]"fmt, __func__, __LINE__, ##__VA_ARGS__) +#define HWFNC_DBG_DUMP(prio, fmt, ...) \ + dprintk(prio, "[hwfence:%s:%d][dbgd]"fmt, __func__, __LINE__, ##__VA_ARGS__) + #define HWFNC_WARN(fmt, ...) \ pr_warn("[hwfence:%s:%d][warn][%pS] "fmt, __func__, __LINE__, \ __builtin_return_address(0), ##__VA_ARGS__) @@ -70,6 +73,13 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data); int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, int client_id); +void hw_fence_debug_dump_queues(enum hw_fence_drv_prio prio, + struct msm_hw_fence_client *hw_fence_client); +void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash, + u32 count); +void hw_fence_debug_dump_table(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data); +void hw_fence_debug_dump_events(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data); + extern const struct file_operations hw_sync_debugfs_fops; struct hw_fence_out_clients_map { diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 28fb37cccc..405d0c0681 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -14,6 +14,14 @@ #define HW_FENCE_DEBUG_MAX_LOOPS 200 +#define HFENCE_TBL_MSG \ + "[%d]hfence[%d] v:%d err:%lu ctx:%llu seq:%llu wait:0x%llx alloc:%d f:0x%llx child_cnt:%d" \ + "%s ct:%llu tt:%llu wt:%llu\n" + +/* each hwfence parent includes one "32-bit" element + "," separator */ +#define HW_FENCE_MAX_PARENTS_SUBLIST_DUMP (MSM_HW_FENCE_MAX_JOIN_PARENTS * 9) +#define HW_FENCE_MAX_PARENTS_DUMP (sizeof("parent_list[] ") + HW_FENCE_MAX_PARENTS_SUBLIST_DUMP) + /* event dump data includes one "32-bit" element + "|" separator */ #define HW_FENCE_MAX_DATA_PER_EVENT_DUMP (HW_FENCE_EVENT_MAX_DATA * 9) @@ -473,29 +481,82 @@ static ssize_t hw_fence_dbg_create_wr(struct file *file, return count; } -#define HFENCE_TBL_MSG \ - "[%d]hfence[%d] v:%d err:%d ctx:%d seqno:%d wait:0x%llx alloc:%d f:0x%lx tt:%llu wt:%llu\n" +static void _dump_fence_helper(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, + char *parents_dump, u64 hash, u32 count) +{ + char sublist[HW_FENCE_MAX_PARENTS_SUBLIST_DUMP]; + u32 parents_cnt; + int i, len = 0; + + if (!hw_fence || !parents_dump) { + HWFNC_ERR("invalid params hw_fence:0x%pK parents_dump:0x%pK\n", hw_fence, + parents_dump); + return; + } + + memset(parents_dump, 0, sizeof(char) * HW_FENCE_MAX_PARENTS_DUMP); + if (hw_fence->parents_cnt) { + if (hw_fence->parents_cnt > MSM_HW_FENCE_MAX_JOIN_PARENTS) { + HWFNC_ERR("hfence[%d] has invalid parents_cnt:%d greater than max:%d\n", + hash, hw_fence->parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS); + parents_cnt = MSM_HW_FENCE_MAX_JOIN_PARENTS; + } else { + parents_cnt = hw_fence->parents_cnt; + } + + memset(sublist, 0, sizeof(sublist)); + for (i = 0; i < parents_cnt; i++) + len += scnprintf(sublist + len, HW_FENCE_MAX_PARENTS_SUBLIST_DUMP - len, + "%lu,", hw_fence->parent_list[i]); + scnprintf(parents_dump, HW_FENCE_MAX_PARENTS_DUMP, " p:[%s]", sublist); + } + + HWFNC_DBG_DUMP(prio, HFENCE_TBL_MSG, + count, hash, hw_fence->valid, hw_fence->error, hw_fence->ctx_id, hw_fence->seq_id, + hw_fence->wait_client_mask, hw_fence->fence_allocator, hw_fence->flags, + hw_fence->pending_child_cnt, parents_dump, hw_fence->fence_create_time, + hw_fence->fence_trigger_time, hw_fence->fence_wait_time); +} + +void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash, + u32 count) +{ + char parents_dump[HW_FENCE_MAX_PARENTS_DUMP]; + + return _dump_fence_helper(prio, hw_fence, parents_dump, hash, count); +} static inline int _dump_fence(struct msm_hw_fence *hw_fence, char *buf, int len, int max_size, u32 index, u32 cnt) { int ret; + char parents_dump[HW_FENCE_MAX_PARENTS_DUMP]; + + _dump_fence_helper(HW_FENCE_INFO, hw_fence, parents_dump, index, cnt); ret = scnprintf(buf + len, max_size - len, HFENCE_TBL_MSG, - cnt, index, hw_fence->valid, hw_fence->error, - hw_fence->ctx_id, hw_fence->seq_id, - hw_fence->wait_client_mask, hw_fence->fence_allocator, - hw_fence->flags, hw_fence->fence_trigger_time, hw_fence->fence_wait_time); - - HWFNC_DBG_L(HFENCE_TBL_MSG, - cnt, index, hw_fence->valid, hw_fence->error, - hw_fence->ctx_id, hw_fence->seq_id, - hw_fence->wait_client_mask, hw_fence->fence_allocator, - hw_fence->flags, hw_fence->fence_trigger_time, hw_fence->fence_wait_time); + cnt, index, hw_fence->valid, hw_fence->error, hw_fence->ctx_id, hw_fence->seq_id, + hw_fence->wait_client_mask, hw_fence->fence_allocator, hw_fence->flags, + hw_fence->pending_child_cnt, parents_dump, hw_fence->fence_create_time, + hw_fence->fence_trigger_time, hw_fence->fence_wait_time); return ret; } +void hw_fence_debug_dump_table(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data) +{ + u32 i, cnt = 0; + struct msm_hw_fence *hw_fence; + + for (i = 0; i < drv_data->hw_fences_tbl_cnt; i++) { + hw_fence = &drv_data->hw_fences_tbl[i]; + if (!hw_fence->valid) + continue; + hw_fence_debug_dump_fence(prio, hw_fence, i, cnt); + cnt++; + } +} + static int dump_single_entry(struct hw_fence_driver_data *drv_data, char *buf, u32 *index, int max_size) { @@ -545,17 +606,40 @@ static int dump_full_table(struct hw_fence_driver_data *drv_data, char *buf, u32 return len; } -static inline int _dump_event(struct msm_hw_fence_event *event, char *buf, int len, int max_size, - u32 index) +static void _find_earliest_event(struct hw_fence_driver_data *drv_data, u32 *start_index, + u64 *start_time) +{ + u32 i; + + if (!start_index || !start_time) { + HWFNC_ERR("invalid params start_index:0x%pK start_time:0x%pK\n", start_index, + start_time); + return; + } + + mb(); /* make sure data is ready before read */ + for (i = 0; i < drv_data->total_events; i++) { + u64 time = drv_data->events[i].time; + + if (time && (!*start_time || time < *start_time)) { + *start_time = time; + *start_index = i; + } + } +} + +static void _dump_event(enum hw_fence_drv_prio prio, struct msm_hw_fence_event *event, + char *data, u32 index) { - char data[HW_FENCE_MAX_DATA_PER_EVENT_DUMP]; u32 data_cnt; - int i, tmp_len = 0, ret = 0; + int i, len = 0; - if (!event->time) - return 0; + if (!event || !data) { + HWFNC_ERR("invalid params event:0x%pK data:0x%pK\n", event, data); + return; + } - memset(&data, 0, sizeof(data)); + memset(data, 0, sizeof(char) * HW_FENCE_MAX_DATA_PER_EVENT_DUMP); if (event->data_cnt > HW_FENCE_EVENT_MAX_DATA) { HWFNC_ERR("event[%d] has invalid data_cnt:%lu greater than max_data_cnt:%lu\n", index, event->data_cnt, HW_FENCE_EVENT_MAX_DATA); @@ -565,15 +649,29 @@ static inline int _dump_event(struct msm_hw_fence_event *event, char *buf, int l } for (i = 0; i < data_cnt; i++) - tmp_len += scnprintf(data + tmp_len, HW_FENCE_MAX_DATA_PER_EVENT_DUMP - tmp_len, + len += scnprintf(data + len, HW_FENCE_MAX_DATA_PER_EVENT_DUMP - len, "%lx|", event->data[i]); - ret = scnprintf(buf + len, max_size - len, HFENCE_EVT_MSG, index, event->cpu, event->time, - event->data_cnt, data); + HWFNC_DBG_DUMP(prio, HFENCE_EVT_MSG, index, event->cpu, event->time, event->data_cnt, data); +} - HWFNC_DBG_INFO(HFENCE_EVT_MSG, index, event->cpu, event->time, event->data_cnt, data); +void hw_fence_debug_dump_events(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data) +{ + char data[HW_FENCE_MAX_DATA_PER_EVENT_DUMP]; + u32 start_index; + u64 start_time; + int i; - return ret; + if (!drv_data->events) { + HWFNC_ERR("events not supported\n"); + return; + } + + _find_earliest_event(drv_data, &start_index, &start_time); + for (i = start_index; i < drv_data->total_events && drv_data->events[i].time; i++) + _dump_event(prio, &drv_data->events[i], data, i); + for (i = 0; i < start_index; i++) + _dump_event(prio, &drv_data->events[i], data, i); } /** @@ -626,15 +724,7 @@ static ssize_t hw_fence_dbg_dump_events_rd(struct file *file, char __user *user_ /* find index of earliest event */ if (!start_time) { - mb(); /* make sure data is ready before read */ - for (index = 0; index < drv_data->total_events; index++) { - u64 time = drv_data->events[index].time; - - if (time && (!start_time || time < start_time)) { - start_time = time; - start_index = index; - } - } + _find_earliest_event(drv_data, &start_index, &start_time); index = start_index; HWFNC_DBG_H("events:0x%pK start_index:%d start_time:%llu total_events:%d\n", drv_data->events, start_index, start_time, drv_data->total_events); @@ -642,7 +732,15 @@ static ssize_t hw_fence_dbg_dump_events_rd(struct file *file, char __user *user_ HWFNC_DBG_H("++ dump_events index:%d qtime:%llu\n", index, hw_fence_get_qtime(drv_data)); while ((!wraparound || index < start_index) && len < (max_size - entry_size)) { - len += _dump_event(&drv_data->events[index], buf, len, max_size, index); + char data[HW_FENCE_MAX_DATA_PER_EVENT_DUMP]; + + if (drv_data->events[index].time) { + _dump_event(HW_FENCE_INFO, &drv_data->events[index], data, index); + len += scnprintf(buf + len, max_size - len, HFENCE_EVT_MSG, index, + drv_data->events[index].cpu, drv_data->events[index].time, + drv_data->events[index].data_cnt, data); + } + index++; if (index >= drv_data->total_events) { index = 0; @@ -668,6 +766,63 @@ exit: return len; } +static void _dump_queue(enum hw_fence_drv_prio prio, struct msm_hw_fence_client *hw_fence_client, + int queue_type) +{ + struct msm_hw_fence_queue *queue; + struct msm_hw_fence_hfi_queue_header *hfi_header; + struct msm_hw_fence_queue_payload *payload; + u64 timestamp; + u32 *read_ptr, queue_entries; + int i; + + queue = &hw_fence_client->queues[queue_type - 1]; + + if ((queue_type > hw_fence_client->queues_num) || !queue || !queue->va_header + || !queue->va_queue) { + HWFNC_ERR("Cannot dump client:%d q_type:%s q_ptr:0x%pK q_header:0x%pK q_va:0x%pK\n", + hw_fence_client->client_id, + (queue_type == HW_FENCE_TX_QUEUE) ? "TX QUEUE" : "RX QUEUE", + queue, queue ? queue->va_header : NULL, queue ? queue->va_queue : NULL); + return; + } + hfi_header = (struct msm_hw_fence_hfi_queue_header *)queue->va_header; + + mb(); /* make sure data is ready before read */ + HWFNC_DBG_DUMP(prio, "%s va:0x%pK rd_idx:%lu wr_idx:%lu tx_wm:%lu q_size_bytes:%lu\n", + (queue_type == HW_FENCE_TX_QUEUE) ? "TX QUEUE" : "RX QUEUE", queue->va_queue, + hfi_header->read_index, hfi_header->write_index, hfi_header->tx_wm, + queue->q_size_bytes); + queue_entries = queue->q_size_bytes / HW_FENCE_CLIENT_QUEUE_PAYLOAD; + + for (i = 0; i < queue_entries; i++) { + read_ptr = ((u32 *)queue->va_queue + + (i * (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)))); + payload = (struct msm_hw_fence_queue_payload *)read_ptr; + timestamp = (u64)payload->timestamp_lo | ((u64)payload->timestamp_hi << 32); + + HWFNC_DBG_DUMP(prio, + "%s[%d]: hash:%d ctx:%llu seqno:%llu f:%llu d:%llu err:%u time:%llu\n", + (queue_type == HW_FENCE_TX_QUEUE) ? "tx" : "rx", i, payload->hash, + payload->ctxt_id, payload->seqno, payload->flags, payload->client_data, + payload->error, timestamp); + } +} + +void hw_fence_debug_dump_queues(enum hw_fence_drv_prio prio, + struct msm_hw_fence_client *hw_fence_client) +{ + if (!hw_fence_client) { + HWFNC_ERR("Invalid params client:0x%pK\n", hw_fence_client); + return; + } + + HWFNC_DBG_DUMP(prio, "Queues for client %d\n", hw_fence_client->client_id); + if (hw_fence_client->queues_num == HW_FENCE_CLIENT_QUEUES) + _dump_queue(prio, hw_fence_client, HW_FENCE_RX_QUEUE); + _dump_queue(prio, hw_fence_client, HW_FENCE_TX_QUEUE); +} + /** * hw_fence_dbg_dump_queues_wr() - debugfs wr to dump the hw-fences queues. * @file: file handler. @@ -682,12 +837,7 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user size_t count, loff_t *ppos) { struct hw_fence_driver_data *drv_data; - struct msm_hw_fence_queue *rx_queue; - struct msm_hw_fence_queue *tx_queue; - u64 hash, ctx_id, seqno, timestamp, flags, client_data; - u32 *read_ptr, error; - int client_id, i; - struct msm_hw_fence_queue_payload *read_ptr_payload; + int client_id; if (!file || !file->private_data) { HWFNC_ERR("unexpected data %d\n", file); @@ -699,53 +849,11 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user if (client_id < 0) return -EINVAL; - if (!drv_data->clients[client_id] || - IS_ERR_OR_NULL(&drv_data->clients[client_id]->queues[HW_FENCE_RX_QUEUE - 1]) || - IS_ERR_OR_NULL(&drv_data->clients[client_id]->queues[HW_FENCE_TX_QUEUE - 1])) { + if (!drv_data->clients[client_id]) { HWFNC_ERR("client %d not initialized\n", client_id); return -EINVAL; } - - HWFNC_DBG_L("Queues for client %d\n", client_id); - - rx_queue = &drv_data->clients[client_id]->queues[HW_FENCE_RX_QUEUE - 1]; - tx_queue = &drv_data->clients[client_id]->queues[HW_FENCE_TX_QUEUE - 1]; - - HWFNC_DBG_L("-------RX QUEUE------\n"); - for (i = 0; i < drv_data->hw_fence_queue_entries; i++) { - read_ptr = ((u32 *)rx_queue->va_queue + - (i * (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)))); - read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr; - - ctx_id = readq_relaxed(&read_ptr_payload->ctxt_id); - seqno = readq_relaxed(&read_ptr_payload->seqno); - hash = readq_relaxed(&read_ptr_payload->hash); - flags = readq_relaxed(&read_ptr_payload->flags); - client_data = readq_relaxed(&read_ptr_payload->client_data); - error = readl_relaxed(&read_ptr_payload->error); - timestamp = (u64)readl_relaxed(&read_ptr_payload->timestamp_lo) | - ((u64)readl_relaxed(&read_ptr_payload->timestamp_hi) << 32); - - HWFNC_DBG_L("rx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu d:%llu err:%u time:%llu\n", - i, hash, ctx_id, seqno, flags, client_data, error, timestamp); - } - - HWFNC_DBG_L("-------TX QUEUE------\n"); - for (i = 0; i < drv_data->hw_fence_queue_entries; i++) { - read_ptr = ((u32 *)tx_queue->va_queue + - (i * (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)))); - read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr; - - ctx_id = readq_relaxed(&read_ptr_payload->ctxt_id); - seqno = readq_relaxed(&read_ptr_payload->seqno); - hash = readq_relaxed(&read_ptr_payload->hash); - flags = readq_relaxed(&read_ptr_payload->flags); - error = readl_relaxed(&read_ptr_payload->error); - timestamp = (u64)readl_relaxed(&read_ptr_payload->timestamp_lo) | - ((u64)readl_relaxed(&read_ptr_payload->timestamp_hi) << 32); - HWFNC_DBG_L("tx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%llu\n", - i, hash, ctx_id, seqno, flags, error, timestamp); - } + hw_fence_debug_dump_queues(HW_FENCE_PRINTK, drv_data->clients[client_id]); return count; } diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 82ee33bdaa..f16e74b448 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -476,6 +476,75 @@ int msm_hw_fence_trigger_signal(void *client_handle, } EXPORT_SYMBOL(msm_hw_fence_trigger_signal); +#if IS_ENABLED(CONFIG_DEBUG_FS) +int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, u32 dump_clients_mask) +{ + struct msm_hw_fence_client *hw_fence_client; + int client_id; + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -EAGAIN; + } else if (IS_ERR_OR_NULL(client_handle)) { + HWFNC_ERR("Invalid client handle:%d\n", IS_ERR_OR_NULL(client_handle)); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + if (dump_flags & MSM_HW_FENCE_DBG_DUMP_QUEUES) { + hw_fence_debug_dump_queues(HW_FENCE_PRINTK, hw_fence_client); + + if (dump_clients_mask) + for (client_id = 0; client_id < HW_FENCE_CLIENT_MAX; client_id++) + if ((dump_clients_mask & (1 << client_id)) && + hw_fence_drv_data->clients[client_id]) + hw_fence_debug_dump_queues(HW_FENCE_PRINTK, + hw_fence_drv_data->clients[client_id]); + } + + if (dump_flags & MSM_HW_FENCE_DBG_DUMP_TABLE) + hw_fence_debug_dump_table(HW_FENCE_PRINTK, hw_fence_drv_data); + + if (dump_flags & MSM_HW_FENCE_DBG_DUMP_EVENTS) + hw_fence_debug_dump_events(HW_FENCE_PRINTK, hw_fence_drv_data); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_dump_debug_data); + +int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence) +{ + struct msm_hw_fence_client *hw_fence_client; + struct msm_hw_fence *hw_fence; + u64 hash; + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -EAGAIN; + } else if (IS_ERR_OR_NULL(client_handle)) { + HWFNC_ERR("Invalid client handle:%d\n", IS_ERR_OR_NULL(client_handle)); + return -EINVAL; + } else if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + HWFNC_ERR("DMA Fence is not a HW Fence ctx:%llu seqno:%llu flags:0x%llx\n", + fence->context, fence->seqno, fence->flags); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + hw_fence = msm_hw_fence_find(hw_fence_drv_data, hw_fence_client, fence->context, + fence->seqno, &hash); + if (!hw_fence) { + HWFNC_ERR("failed to find hw-fence client_id:%d fence:0x%pK ctx:%llu seqno:%llu\n", + hw_fence_client->client_id, fence, fence->context, fence->seqno); + return -EINVAL; + } + hw_fence_debug_dump_fence(HW_FENCE_PRINTK, hw_fence, hash, 0); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_dump_fence); +#endif /* CONFIG_DEBUG_FS */ + /* Function used for simulation purposes only. */ int msm_hw_fence_driver_doorbell_sim(u64 db_mask) { From 8b9d1c6da83fa88a568399476ae217138aa0bad6 Mon Sep 17 00:00:00 2001 From: Varsha Suresh Date: Wed, 15 Mar 2023 16:56:03 -0700 Subject: [PATCH 072/166] mm-drivers: Add LOCAL_MODULE_DDK_BUILD argument A parameter is set to enable the build to be executed with DDK framework. Change-Id: Ib98d5a990aa1cfe836d9214111bfef317a4c4fae Signed-off-by: Varsha Suresh --- hw_fence/Android.mk | 1 + msm_ext_display/Android.mk | 1 + sync_fence/Android.mk | 1 + 3 files changed, 3 insertions(+) diff --git a/hw_fence/Android.mk b/hw_fence/Android.mk index bad9f10b96..149702d2d7 100644 --- a/hw_fence/Android.mk +++ b/hw_fence/Android.mk @@ -1,4 +1,5 @@ LOCAL_PATH := $(call my-dir) +LOCAL_MODULE_DDK_BUILD := true include $(CLEAR_VARS) # This makefile is only for DLKM diff --git a/msm_ext_display/Android.mk b/msm_ext_display/Android.mk index 78d659c784..cef996a482 100644 --- a/msm_ext_display/Android.mk +++ b/msm_ext_display/Android.mk @@ -1,4 +1,5 @@ LOCAL_PATH := $(call my-dir) +LOCAL_MODULE_DDK_BUILD := true include $(CLEAR_VARS) # This makefile is only for DLKM diff --git a/sync_fence/Android.mk b/sync_fence/Android.mk index d784b18e9c..f041c70ef4 100644 --- a/sync_fence/Android.mk +++ b/sync_fence/Android.mk @@ -1,4 +1,5 @@ LOCAL_PATH := $(call my-dir) +LOCAL_MODULE_DDK_BUILD := true include $(CLEAR_VARS) # This makefile is only for DLKM From abf0680f4cbe7fc37fa54ddd08ea7cb581bfdc47 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 28 Mar 2023 13:42:17 -0700 Subject: [PATCH 073/166] mm-drivers: hw_fence: add fence error support for clients without rxq Add HW Fence Driver support to notify waiting clients of fence error in HLOS. This is a requirement by clients that do not have Rx Queue. Such clients can register a fence error callback function with data that will be passed back with callback. The fence error callback function is called by HW Fence Driver when: 1. Client registers for a fence already signaled with error. 2. Error is signaled for a fence that the client registered to wait on. Change-Id: I2892333838001bed1152118b947cfe12b1a8dd04 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 14 ++- hw_fence/include/hw_fence_drv_utils.h | 15 +++ hw_fence/src/hw_fence_drv_priv.c | 64 ++++++----- hw_fence/src/hw_fence_drv_utils.c | 160 ++++++++++++++++++++++---- hw_fence/src/msm_hw_fence.c | 69 +++++++++++ 5 files changed, 273 insertions(+), 49 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 359347b171..ac002c82d9 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -131,9 +131,12 @@ struct msm_hw_fence_queue { /** * enum payload_type - Enum with the queue payload types. + * HW_FENCE_PAYLOAD_TYPE_1: client queue payload + * HW_FENCE_PAYLOAD_TYPE_2: ctrl queue payload for fence error; client_data stores client_id */ enum payload_type { - HW_FENCE_PAYLOAD_TYPE_1 = 1 + HW_FENCE_PAYLOAD_TYPE_1 = 1, + HW_FENCE_PAYLOAD_TYPE_2 }; /** @@ -144,6 +147,10 @@ enum payload_type { * @mem_descriptor: hfi header memory descriptor * @queues: queues descriptor * @queues_num: number of client queues + * @fence_error_cb: function called for waiting clients that need HLOS notification of fence error + * @fence_error_cb_userdata: opaque pointer registered with fence error callback and passed to + * client during invocation of callback function + * @error_cb_lock: lock to synchronize access to fence error cb and fence error cb data * @ipc_signal_id: id of the signal to be triggered for this client * @ipc_client_vid: virtual id of the ipc client for this hw fence driver client * @ipc_client_pid: physical id of the ipc client for this hw fence driver client @@ -158,6 +165,9 @@ struct msm_hw_fence_client { struct msm_hw_fence_mem_addr mem_descriptor; struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES]; int queues_num; + msm_hw_fence_error_cb_t fence_error_cb; + void *fence_error_cb_userdata; + struct mutex error_cb_lock; int ipc_signal_id; int ipc_client_vid; int ipc_client_pid; @@ -508,6 +518,8 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data); int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, int queue_type); +int hw_fence_read_queue_helper(struct msm_hw_fence_queue *queue, + struct msm_hw_fence_queue_payload *payload); int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash, u64 client_data); diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 43871ee571..29c0f343e8 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -122,6 +122,21 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, u32 reset_flags); +/** + * hw_fence_utils_fence_error_cb() - Invokes fence error callback registered by specified client + * + * @hw_fence_client: client, for which fence error callback must be invoked + * @ctxt_id: context id of the hw-fence + * @seqno: sequence number of the hw-fence + * @hash: hash of the hw-fence + * @flags: flags of the hw-fence + * @error: error of the hw-fence + * + * Returns zero if success, otherwise returns negative error code + */ +int hw_fence_utils_fence_error_cb(struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, + u64 seqno, u64 hash, u64 flags, u32 error); + /** * hw_fence_utils_get_client_id_priv() - Gets the index into clients struct within hw fence driver * from the client_id used externally diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 8fba461b66..4b71d93498 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -187,15 +187,7 @@ char *_get_queue_type(int queue_type) int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, int queue_type) { - struct msm_hw_fence_hfi_queue_header *hfi_header; struct msm_hw_fence_queue *queue; - u32 read_idx; - u32 write_idx; - u32 to_read_idx; - u32 *read_ptr; - u32 payload_size_u32; - u32 q_size_u32; - struct msm_hw_fence_queue_payload *read_ptr_payload; if (queue_type >= HW_FENCE_CLIENT_QUEUES || !hw_fence_client || !payload) { HWFNC_ERR("Invalid queue type:%s hw_fence_client:0x%pK payload:0x%pK\n", queue_type, @@ -204,6 +196,20 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, } queue = &hw_fence_client->queues[queue_type]; + HWFNC_DBG_Q("read client:%lu queue:0x%pK\n", hw_fence_client->client_id, queue); + + return hw_fence_read_queue_helper(queue, payload); +} + +int hw_fence_read_queue_helper(struct msm_hw_fence_queue *queue, + struct msm_hw_fence_queue_payload *payload) +{ + struct msm_hw_fence_hfi_queue_header *hfi_header; + u32 read_idx, write_idx, to_read_idx; + u32 *read_ptr; + u32 payload_size_u32, q_size_u32; + struct msm_hw_fence_queue_payload *read_ptr_payload; + hfi_header = queue->va_header; q_size_u32 = (queue->q_size_bytes / sizeof(u32)); @@ -230,13 +236,12 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, read_idx, write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); } - HWFNC_DBG_Q("read client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n", - hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, - read_idx, write_idx, queue); + HWFNC_DBG_Q("read rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n", + &hfi_header->read_index, &hfi_header->write_index, read_idx, write_idx, queue); if (read_idx == write_idx) { HWFNC_DBG_Q("Nothing to read!\n"); - return 0; + return -EINVAL; } /* Move the pointer where we need to read and cast it */ @@ -264,12 +269,7 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, } /* Read the Client Queue */ - payload->ctxt_id = readq_relaxed(&read_ptr_payload->ctxt_id); - payload->seqno = readq_relaxed(&read_ptr_payload->seqno); - payload->hash = readq_relaxed(&read_ptr_payload->hash); - payload->flags = readq_relaxed(&read_ptr_payload->flags); - payload->client_data = readq_relaxed(&read_ptr_payload->client_data); - payload->error = readl_relaxed(&read_ptr_payload->error); + *payload = *read_ptr_payload; /* update the read index */ writel_relaxed(to_read_idx, &hfi_header->read_index); @@ -1204,15 +1204,22 @@ static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, HWFNC_DBG_H("We must signal the client now! hfence hash:%llu\n", hash); - /* Write to Rx queue */ - if (hw_fence_client->update_rxq) - hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id, - hw_fence->seq_id, hash, flags, client_data, error, HW_FENCE_RX_QUEUE - 1); + /* Call fence error callback */ + if (error && hw_fence_client->fence_error_cb) { + hw_fence_utils_fence_error_cb(hw_fence_client, hw_fence->ctx_id, hw_fence->seq_id, + hash, flags, error); + } else { + /* Write to Rx queue */ + if (hw_fence_client->update_rxq) + hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id, + hw_fence->seq_id, hash, flags, client_data, error, + HW_FENCE_RX_QUEUE - 1); - /* Signal the hw fence now */ - if (hw_fence_client->send_ipc) - hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, - hw_fence_client->ipc_signal_id); + /* Signal the hw fence now */ + if (hw_fence_client->send_ipc) + hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, + hw_fence_client->ipc_signal_id); + } #if IS_ENABLED(CONFIG_DEBUG_FS) if (hw_fence_client->client_id >= HW_FENCE_CLIENT_ID_VAL0 @@ -1355,6 +1362,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, /* child fence is already signaled */ GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */ + join_fence->error |= hw_fence_child->error; if (--join_fence->pending_child_cnt == 0) signal_join_fence = true; @@ -1400,8 +1408,8 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, if (signal_join_fence) { /* signal the join hw fence */ - _fence_ctl_signal(drv_data, hw_fence_client, join_fence, *hash_join_fence, 0, 0, - client_data); + _fence_ctl_signal(drv_data, hw_fence_client, join_fence, *hash_join_fence, 0, + client_data, join_fence->error); set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &array->base.flags); /* diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 9e8629b428..f2b34c1c4f 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -53,11 +53,34 @@ #define HW_FENCE_CLIENT_TYPE_MAX_VPU 32 #define HW_FENCE_CLIENT_TYPE_MAX_IFE 32 -/* - * Each bit in this mask represents each of the loopback clients supported in - * the enum hw_fence_client_id +/** + * HW_FENCE_CTRL_QUEUE_DOORBELL: + * Bit set in doorbell flags mask if hw fence driver should read ctrl rx queue */ -#define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7fff +#define HW_FENCE_CTRL_QUEUE_DOORBELL 0 + +/** + * HW_FENCE_DOORBELL_FLAGS_ID_LAST: + * Last doorbell flags id for which HW Fence Driver can receive doorbell + */ +#if IS_ENABLED(CONFIG_DEBUG_FS) +#define HW_FENCE_DOORBELL_FLAGS_ID_LAST HW_FENCE_CLIENT_ID_VAL6 +#else +#define HW_FENCE_DOORBELL_FLAGS_ID_LAST HW_FENCE_CTRL_QUEUE_DOORBELL +#endif /* CONFIG_DEBUG_FS */ + +/** + * HW_FENCE_DOORBELL_MASK: + * Each bit in this mask represents possible doorbell flag ids for which hw fence driver can receive + */ +#define HW_FENCE_DOORBELL_MASK \ + GENMASK(HW_FENCE_DOORBELL_FLAGS_ID_LAST, HW_FENCE_CTRL_QUEUE_DOORBELL) + +/** + * HW_FENCE_MAX_ITER_READ: + * Maximum number of iterations when reading queue + */ +#define HW_FENCE_MAX_ITER_READ 100 /** * HW_FENCE_MAX_EVENTS: @@ -179,12 +202,110 @@ void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, } } -static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int client_id) +int hw_fence_utils_fence_error_cb(struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, + u64 seqno, u64 hash, u64 flags, u32 error) +{ + struct msm_hw_fence_cb_data cb_data; + struct dma_fence fence; + int ret = 0; + + if (IS_ERR_OR_NULL(hw_fence_client)) { + HWFNC_ERR("Invalid client:0x%pK\n", hw_fence_client); + return -EINVAL; + } + + mutex_lock(&hw_fence_client->error_cb_lock); + if (!error || !hw_fence_client->fence_error_cb) { + HWFNC_ERR("Invalid error:%d fence_error_cb:0x%pK\n", error, + hw_fence_client->fence_error_cb); + ret = -EINVAL; + goto exit; + } + + /* initialize cb_data info */ + fence.context = ctxt_id; + fence.seqno = seqno; + fence.flags = flags; + fence.error = error; + cb_data.fence = &fence; + cb_data.data = hw_fence_client->fence_error_cb_userdata; + + HWFNC_DBG_L("invoking cb for client:%d ctx:%llu seq:%llu flags:%llu e:%u data:0x%pK\n", + hw_fence_client->client_id, ctxt_id, seqno, flags, error, + hw_fence_client->fence_error_cb_userdata); + + hw_fence_client->fence_error_cb(hash, error, &cb_data); + +exit: + mutex_unlock(&hw_fence_client->error_cb_lock); + + return ret; +} + +static int _process_fence_error_client_loopback(struct hw_fence_driver_data *drv_data, + int db_flag_id) +{ + struct msm_hw_fence_client *hw_fence_client; + struct msm_hw_fence_queue_payload payload; + int i, cb_ret, ret = 0, read = 1; + u32 client_id; + + for (i = 0; read && i < HW_FENCE_MAX_ITER_READ; i++) { + read = hw_fence_read_queue_helper(&drv_data->ctrl_queues[HW_FENCE_RX_QUEUE - 1], + &payload); + if (read < 0) { + HWFNC_DBG_Q("unable to read ctrl rxq for db_flag_id:%d\n", db_flag_id); + return read; + } + if (payload.type != HW_FENCE_PAYLOAD_TYPE_2) { + HWFNC_ERR("unsupported payload type in ctrl rxq received:%u expected:%u\n", + payload.type, HW_FENCE_PAYLOAD_TYPE_2); + ret = -EINVAL; + continue; + } + if (payload.client_data < HW_FENCE_CLIENT_ID_CTX0 || + payload.client_data >= drv_data->clients_num) { + HWFNC_ERR("read invalid client_id:%llu from ctrl rxq min:%u max:%u\n", + payload.client_data, HW_FENCE_CLIENT_ID_CTX0, + drv_data->clients_num); + ret = -EINVAL; + continue; + } + + client_id = payload.client_data; + HWFNC_DBG_Q("ctrl rxq rd: it:%d h:%llu ctx:%llu seq:%llu f:%llu e:%u client:%u\n", + i, payload.hash, payload.ctxt_id, payload.seqno, payload.flags, + payload.error, client_id); + + hw_fence_client = drv_data->clients[client_id]; + if (!hw_fence_client) { + HWFNC_ERR("processing fence error cb for unregistered client_id:%u\n", + client_id); + ret = -EINVAL; + continue; + } + + cb_ret = hw_fence_utils_fence_error_cb(hw_fence_client, payload.ctxt_id, + payload.seqno, payload.hash, payload.flags, payload.error); + if (cb_ret) { + HWFNC_ERR("fence_error_cb failed for client:%u ctx:%llu seq:%llu err:%u\n", + client_id, payload.ctxt_id, payload.seqno, payload.error); + ret = cb_ret; + } + } + + return ret; +} + +static int _process_doorbell_id(struct hw_fence_driver_data *drv_data, int db_flag_id) { int ret; - HWFNC_DBG_H("Processing doorbell client_id:%d\n", client_id); - switch (client_id) { + HWFNC_DBG_H("Processing doorbell mask id:%d\n", db_flag_id); + switch (db_flag_id) { + case HW_FENCE_CTRL_QUEUE_DOORBELL: + ret = _process_fence_error_client_loopback(drv_data, db_flag_id); + break; #if IS_ENABLED(CONFIG_DEBUG_FS) case HW_FENCE_CLIENT_ID_VAL0: case HW_FENCE_CLIENT_ID_VAL1: @@ -193,11 +314,11 @@ static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int c case HW_FENCE_CLIENT_ID_VAL4: case HW_FENCE_CLIENT_ID_VAL5: case HW_FENCE_CLIENT_ID_VAL6: - ret = process_validation_client_loopback(drv_data, client_id); + ret = process_validation_client_loopback(drv_data, db_flag_id); break; #endif /* CONFIG_DEBUG_FS */ default: - HWFNC_ERR("unknown client:%d\n", client_id); + HWFNC_ERR("unknown mask id:%d\n", db_flag_id); ret = -EINVAL; } @@ -206,22 +327,21 @@ static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int c void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags) { - int client_id = HW_FENCE_CLIENT_ID_CTL0; + int db_flag_id = HW_FENCE_CTRL_QUEUE_DOORBELL; u64 mask; - for (; client_id <= HW_FENCE_CLIENT_ID_VAL6; client_id++) { - mask = 1 << client_id; + for (; db_flag_id <= HW_FENCE_DOORBELL_FLAGS_ID_LAST; db_flag_id++) { + mask = 1 << db_flag_id; if (mask & db_flags) { - HWFNC_DBG_H("client_id:%d signaled! flags:0x%llx\n", client_id, db_flags); + HWFNC_DBG_H("db_flag:%d signaled! flags:0x%llx\n", db_flag_id, db_flags); - /* process client */ - if (_process_doorbell_client(drv_data, client_id)) - HWFNC_ERR("Failed to process client:%d\n", client_id); + if (_process_doorbell_id(drv_data, db_flag_id)) + HWFNC_ERR("Failed to process db_flag_id:%d\n", db_flag_id); - /* clear mask for this client and if nothing else pending finish */ + /* clear mask for this flag id if nothing else pending finish */ db_flags = db_flags & ~(mask); - HWFNC_DBG_H("client_id:%d cleared flags:0x%llx mask:0x%llx ~mask:0x%llx\n", - client_id, db_flags, mask, ~(mask)); + HWFNC_DBG_H("db_flag_id:%d cleared flags:0x%llx mask:0x%llx ~mask:0x%llx\n", + db_flag_id, db_flags, mask, ~(mask)); if (!db_flags) break; } @@ -232,7 +352,7 @@ void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, static void _hw_fence_cb(int irq, void *data) { struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data; - gh_dbl_flags_t clear_flags = HW_FENCE_LOOPBACK_CLIENTS_MASK; + gh_dbl_flags_t clear_flags = HW_FENCE_DOORBELL_MASK; int ret; if (!drv_data) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index f16e74b448..4842ba0cd7 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -118,6 +118,8 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, if (ret) goto error; + mutex_init(&hw_fence_client->error_cb_lock); + HWFNC_DBG_INIT("Initialized ptr:0x%p client_id:%d q_num:%d ipc signal:%d vid:%d pid:%d\n", hw_fence_client, hw_fence_client->client_id, hw_fence_client->queues_num, hw_fence_client->ipc_signal_id, hw_fence_client->ipc_client_vid, @@ -476,6 +478,73 @@ int msm_hw_fence_trigger_signal(void *client_handle, } EXPORT_SYMBOL(msm_hw_fence_trigger_signal); +int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t cb, void *data) +{ + struct msm_hw_fence_client *hw_fence_client; + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -EAGAIN; + } else if (IS_ERR_OR_NULL(client_handle) || IS_ERR_OR_NULL(cb) || IS_ERR_OR_NULL(data)) { + HWFNC_ERR("Invalid params client:0x%pK cb_func:0x%pK data:0x%pK\n", client_handle, + cb, data); + return -EINVAL; + } + + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + if (hw_fence_client->fence_error_cb) { + HWFNC_ERR("client_id:%d client_id_ext:%d already registered cb_func:%pK data:%pK\n", + hw_fence_client->client_id, hw_fence_client->client_id_ext, + hw_fence_client->fence_error_cb, hw_fence_client->fence_error_cb_userdata); + return -EINVAL; + } + + hw_fence_client->fence_error_cb_userdata = data; + hw_fence_client->fence_error_cb = cb; + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_register_error_cb); + +int msm_hw_fence_deregister_error_cb(void *client_handle) +{ + struct msm_hw_fence_client *hw_fence_client; + int ret = 0; + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -EAGAIN; + } else if (IS_ERR_OR_NULL(client_handle)) { + HWFNC_ERR("Invalid client: 0x%pK\n", client_handle); + return -EINVAL; + } + + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + if (!mutex_trylock(&hw_fence_client->error_cb_lock)) { + HWFNC_ERR("client_id:%d is modifying or using fence_error_cb:0x%pK data:0x%pK\n", + hw_fence_client->client_id, hw_fence_client->fence_error_cb, + hw_fence_client->fence_error_cb_userdata); + return -EAGAIN; + } + + if (!hw_fence_client->fence_error_cb) { + HWFNC_ERR("client_id:%d client_id_ext:%d did not register cb:%pK data:%pK\n", + hw_fence_client->client_id, hw_fence_client->client_id_ext, + hw_fence_client->fence_error_cb, hw_fence_client->fence_error_cb_userdata); + ret = -EINVAL; + goto exit; + } + + hw_fence_client->fence_error_cb = NULL; + hw_fence_client->fence_error_cb_userdata = NULL; + +exit: + mutex_unlock(&hw_fence_client->error_cb_lock); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_deregister_error_cb); + #if IS_ENABLED(CONFIG_DEBUG_FS) int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, u32 dump_clients_mask) { From b48c190c8bc41c34dce239c69abda28d1290330a Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 16 Dec 2022 17:33:40 -0800 Subject: [PATCH 074/166] mm-drivers: hw_fence: add support for out of order signaling Fence error use case may require that later fence in tx queue be signaled with error before earlier fence. HW Fence Driver provides limited support for this scenario by providing a way to swap the first two entries of client Tx Queue. Change-Id: I00faada95a3c33c1dcced79bea5fef3b581152cd Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 2 + hw_fence/src/hw_fence_drv_priv.c | 172 ++++++++++++++++++++++----- hw_fence/src/msm_hw_fence.c | 29 +++++ 3 files changed, 173 insertions(+), 30 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 359347b171..95cf76d5a5 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -505,6 +505,8 @@ int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, u64 flags, u64 client_data, u32 error, int queue_type); +int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hash, u32 error); inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data); int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, int queue_type); diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 8fba461b66..1f73494fc5 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -17,6 +17,15 @@ #define IS_HW_FENCE_TX_QUEUE(queue_type) ((queue_type) == HW_FENCE_TX_QUEUE - 1) +#define REQUIRES_IDX_TRANSLATION(queue) \ + ((queue)->rd_wr_idx_factor && ((queue)->rd_wr_idx_start || (queue)->rd_wr_idx_factor > 1)) + +#define IDX_TRANSLATE_CUSTOM_TO_DEFAULT(queue, idx) \ + (((idx) - (queue)->rd_wr_idx_start) * (queue)->rd_wr_idx_factor) + +#define IDX_TRANSLATE_DEFAULT_TO_CUSTOM(queue, idx) \ + (((idx) / (queue)->rd_wr_idx_factor) + (queue)->rd_wr_idx_start) + inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data) { #ifdef HWFENCE_USE_SLEEP_TIMER @@ -184,6 +193,17 @@ char *_get_queue_type(int queue_type) return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? "RXQ" : "TXQ"; } +static void _translate_queue_indexes_custom_to_default(struct msm_hw_fence_queue *queue, + u32 *read_idx, u32 *write_idx) +{ + if (REQUIRES_IDX_TRANSLATION(queue)) { + *read_idx = IDX_TRANSLATE_CUSTOM_TO_DEFAULT(queue, *read_idx); + *write_idx = IDX_TRANSLATE_CUSTOM_TO_DEFAULT(queue, *write_idx); + HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n", + *read_idx, *write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } +} + int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, int queue_type) { @@ -223,12 +243,7 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, write_idx = readl_relaxed(&hfi_header->write_index); /* translate read and write indexes from custom indexing to dwords with no offset */ - if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { - read_idx = (read_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; - write_idx = (write_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; - HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n", - read_idx, write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); - } + _translate_queue_indexes_custom_to_default(queue, &read_idx, &write_idx); HWFNC_DBG_Q("read client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n", hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, @@ -257,8 +272,8 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, to_read_idx = 0; /* translate to_read_idx to custom indexing with offset */ - if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { - to_read_idx = (to_read_idx / queue->rd_wr_idx_factor) + queue->rd_wr_idx_start; + if (REQUIRES_IDX_TRANSLATION(queue)) { + to_read_idx = IDX_TRANSLATE_DEFAULT_TO_CUSTOM(queue, to_read_idx); HWFNC_DBG_Q("translated to_read_idx:%lu rd_wr_idx start:%lu factor:%lu\n", to_read_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); } @@ -281,6 +296,34 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, return to_read_idx == write_idx ? 0 : 1; } +static int _get_update_queue_params(struct msm_hw_fence_queue *queue, + struct msm_hw_fence_hfi_queue_header **hfi_header, u32 *q_size_u32, u32 *payload_size, + u32 *payload_size_u32, u32 **wr_ptr) +{ + if (!queue) { + HWFNC_ERR("invalid queue\n"); + return -EINVAL; + } + + *hfi_header = queue->va_header; + if (!*hfi_header) { + HWFNC_ERR("Invalid queue hfi_header\n"); + return -EINVAL; + } + + *q_size_u32 = (queue->q_size_bytes / sizeof(u32)); + *payload_size = sizeof(struct msm_hw_fence_queue_payload); + *payload_size_u32 = (*payload_size / sizeof(u32)); + + /* if skipping update wr_index, then use hfi_header->tx_wm instead */ + if (queue->skip_wr_idx) + *wr_ptr = &((*hfi_header)->tx_wm); + else + *wr_ptr = &((*hfi_header)->write_index); + + return 0; +} + /* * This function writes to the queue of the client. The 'queue_type' determines * if this function is writing to the rx or tx queue @@ -312,23 +355,13 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, } queue = &hw_fence_client->queues[queue_type]; - hfi_header = queue->va_header; - - q_size_u32 = (queue->q_size_bytes / sizeof(u32)); - payload_size = sizeof(struct msm_hw_fence_queue_payload); - payload_size_u32 = (payload_size / sizeof(u32)); - - if (!hfi_header) { - HWFNC_ERR("Invalid queue\n"); + if (_get_update_queue_params(queue, &hfi_header, &q_size_u32, &payload_size, + &payload_size_u32, &wr_ptr)) { + HWFNC_ERR("Invalid client:%d q_type:%d queue\n", hw_fence_client->client_id, + queue_type); return -EINVAL; } - /* if skipping update wr_index, then use hfi_header->tx_wm instead */ - if (queue->skip_wr_idx) - wr_ptr = &hfi_header->tx_wm; - else - wr_ptr = &hfi_header->write_index; - /* * We need to lock the client if there is an Rx Queue update, since that * is the only time when HW Fence driver can have a race condition updating @@ -361,12 +394,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, read_idx, write_idx, queue, queue_type, queue->skip_wr_idx ? "true" : "false"); /* translate read and write indexes from custom indexing to dwords with no offset */ - if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { - read_idx = (read_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; - write_idx = (write_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; - HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n", - read_idx, write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); - } + _translate_queue_indexes_custom_to_default(queue, &read_idx, &write_idx); /* Check queue to make sure message will fit */ q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) : @@ -402,8 +430,8 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, to_write_idx = 0; /* translate to_write_idx to custom indexing with offset */ - if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { - to_write_idx = (to_write_idx / queue->rd_wr_idx_factor) + queue->rd_wr_idx_start; + if (REQUIRES_IDX_TRANSLATION(queue)) { + to_write_idx = IDX_TRANSLATE_DEFAULT_TO_CUSTOM(queue, to_write_idx); HWFNC_DBG_Q("translated to_write_idx:%lu rd_wr_idx start:%lu factor:%lu\n", to_write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); } @@ -438,6 +466,90 @@ exit: return ret; } +int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hash, u32 error) +{ + u32 q_size_u32, payload_size, payload_size_u32, read_idx, write_idx, second_idx, *wr_ptr; + struct msm_hw_fence_queue_payload tmp, *first_payload, *second_payload; + struct msm_hw_fence_hfi_queue_header *hfi_header; + struct msm_hw_fence_queue *queue; + int ret = 0; + + queue = &hw_fence_client->queues[HW_FENCE_TX_QUEUE - 1]; + if (_get_update_queue_params(queue, &hfi_header, &q_size_u32, &payload_size, + &payload_size_u32, &wr_ptr)) { + HWFNC_ERR("Invalid client:%d tx queue\n", hw_fence_client->client_id); + return -EINVAL; + } + + /* Make sure data is ready before read */ + mb(); + + /* Get read and write index */ + read_idx = hfi_header->read_index; + write_idx = *wr_ptr; + + /* translate read and write indexes from custom indexing to dwords with no offset */ + _translate_queue_indexes_custom_to_default(queue, &read_idx, &write_idx); + + if (read_idx == write_idx) { + HWFNC_DBG_Q("Empty queue, no entry matches with hash:%llu\n", hash); + return -EINVAL; + } + + first_payload = (struct msm_hw_fence_queue_payload *)((u32 *)queue->va_queue + read_idx); + HWFNC_DBG_Q("client:%d txq: va=0x%pK pa=0x%pK idx:%d ptr_payload:0x%pK\n", + hw_fence_client->client_id, queue->va_queue, queue->pa_queue, read_idx, + first_payload); + + if (first_payload->hash == hash) { + /* Swap not needed, update first payload in client queue with fence error */ + first_payload->error = error; + } else { + /* Check whether second entry matches hash */ + second_idx = read_idx + payload_size_u32; + + /* wrap-around case */ + if (second_idx >= q_size_u32) + second_idx = 0; + + if (second_idx == write_idx) { + HWFNC_ERR("Failed to find matching entry with hash:%llu\n", hash); + return -EINVAL; + } + + second_payload = (struct msm_hw_fence_queue_payload *) + ((u32 *)queue->va_queue + second_idx); + HWFNC_DBG_Q("client:%d txq: va=0x%pK pa=0x%pK idx:%d ptr_payload:0x%pK\n", + hw_fence_client->client_id, queue->va_queue, queue->pa_queue, second_idx, + second_payload); + + if (second_payload->hash != hash) { + HWFNC_ERR("hash:%llu not found in first two queue payloads:%u, %u\n", hash, + read_idx, second_idx); + return -EINVAL; + } + + /* swap first and second payload, updating error field in new first payload */ + tmp = *first_payload; + *first_payload = *second_payload; + first_payload->error = error; + *second_payload = tmp; + + HWFNC_DBG_L("client_id:%d txq move from idx:%u to idx:%u hash:%llu c:%llu s:%llu\n", + hw_fence_client->client_id, read_idx, second_idx, hash, tmp.ctxt_id, + tmp.seqno); + } + + /* update memory for the messages */ + wmb(); + + HWFNC_DBG_L("client_id:%d update tx queue index:%u hash:%llu error:%u\n", + hw_fence_client->client_id, read_idx, hash, error); + + return ret; +} + static int init_global_locks(struct hw_fence_driver_data *drv_data) { struct msm_hw_fence_mem_addr *mem_descriptor; diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 82ee33bdaa..9486fe507d 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -451,6 +451,35 @@ int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 erro } EXPORT_SYMBOL(msm_hw_fence_update_txq); + +int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, u32 update_flags) +{ + struct msm_hw_fence_client *hw_fence_client; + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready || + !hw_fence_drv_data->vm_ready) { + HWFNC_ERR("hw fence driver or vm not ready\n"); + return -EAGAIN; + } else if (IS_ERR_OR_NULL(client_handle) || + (handle >= hw_fence_drv_data->hw_fences_tbl_cnt) || !error) { + HWFNC_ERR("Invalid client_handle:0x%pK or fence handle:%d max:%d or error:%d\n", + client_handle, handle, hw_fence_drv_data->hw_fences_tbl_cnt, error); + return -EINVAL; + } else if (update_flags != MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE) { + HWFNC_ERR("invalid flags:0x%x expected:0x%x no support of in-place error update\n", + update_flags, MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + /* Write to Tx queue */ + hw_fence_update_existing_txq_payload(hw_fence_drv_data, hw_fence_client, + handle, error); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_update_txq_error); + /* tx client has to be the physical, rx client virtual id*/ int msm_hw_fence_trigger_signal(void *client_handle, u32 tx_client_pid, u32 rx_client_vid, From 508cc02147fdc14c4fc776b62caf4960b4ca05a1 Mon Sep 17 00:00:00 2001 From: Varsha Suresh Date: Thu, 4 May 2023 15:46:47 -0700 Subject: [PATCH 075/166] mm-driver: Refactor to separate modules into packages Currently, all of the modules are built from a single Bazel package. This complicates integration with the vendor build because the Android build system builds the modules individually. Split the modules into their own Bazel packages to align more closely with the Android build system's expectations and easy to hook bazel build. Change-Id: I100e9ec9edbe96212089a5944cbba4d6677ff83a Signed-off-by: Varsha Suresh --- BUILD.bazel | 32 ++----- hw_fence/BUILD.bazel | 16 ++++ hw_fence/Kconfig | 4 + hw_fence/defconfig | 1 + hw_fence/define_hw_fence.bzl | 46 +++++++++ mm_module_build.bzl | 103 --------------------- mm_modules.bzl | 44 --------- msm_ext_display/BUILD.bazel | 10 ++ msm_ext_display/Kconfig | 4 + msm_ext_display/defconfig | 1 + msm_ext_display/define_msm_ext_display.bzl | 31 +++++++ sync_fence/BUILD.bazel | 16 ++++ sync_fence/Kconfig | 4 + sync_fence/defconfig | 1 + sync_fence/define_sync_fence.bzl | 33 +++++++ target.bzl | 16 ---- 16 files changed, 176 insertions(+), 186 deletions(-) create mode 100644 hw_fence/BUILD.bazel create mode 100644 hw_fence/Kconfig create mode 100644 hw_fence/defconfig create mode 100644 hw_fence/define_hw_fence.bzl delete mode 100644 mm_module_build.bzl delete mode 100644 mm_modules.bzl create mode 100644 msm_ext_display/BUILD.bazel create mode 100644 msm_ext_display/Kconfig create mode 100644 msm_ext_display/defconfig create mode 100644 msm_ext_display/define_msm_ext_display.bzl create mode 100644 sync_fence/BUILD.bazel create mode 100644 sync_fence/Kconfig create mode 100644 sync_fence/defconfig create mode 100644 sync_fence/define_sync_fence.bzl delete mode 100644 target.bzl diff --git a/BUILD.bazel b/BUILD.bazel index 77944804ce..5f4185bcfe 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -2,35 +2,21 @@ load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") package( default_visibility = [ - "//visibility:public"], + "//visibility:public", + ], ) ddk_headers( name = "mm_drivers_configs", - hdrs = glob([ - "config/*.h"]), - includes = ["config"] -) - -ddk_headers( - name = "hw_fence_headers", - hdrs = glob([ - "hw_fence/include/*.h"]), - includes = ["hw_fence/include"] -) - -ddk_headers( - name = "sync_fence_uapi_headers", - hdrs = glob([ - "sync_fence/include/uapi/sync_fence/*.h", - "sync_fence/include/*.h"]), - includes = ["sync_fence/include"] + hdrs = glob(["config/*.h"]), + includes = ["config"], ) ddk_headers( name = "mm_drivers_headers", - hdrs = [":mm_drivers_configs", ":hw_fence_headers", ":sync_fence_uapi_headers"] + hdrs = [ + ":mm_drivers_configs", + "//vendor/qcom/opensource/mm-drivers/hw_fence:hw_fence_headers", + "//vendor/qcom/opensource/mm-drivers/sync_fence:sync_fence_uapi_headers", + ], ) - -load(":target.bzl", "define_pineapple") -define_pineapple() \ No newline at end of file diff --git a/hw_fence/BUILD.bazel b/hw_fence/BUILD.bazel new file mode 100644 index 0000000000..808c0ec9d3 --- /dev/null +++ b/hw_fence/BUILD.bazel @@ -0,0 +1,16 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") +load(":define_hw_fence.bzl", "define_hw_fence") + +package( + default_visibility = [ + "//visibility:public" + ], +) + +ddk_headers( + name = "hw_fence_headers", + hdrs = glob(["include/*.h"]), + includes = ["include"] +) + +define_hw_fence() diff --git a/hw_fence/Kconfig b/hw_fence/Kconfig new file mode 100644 index 0000000000..a50b02eefd --- /dev/null +++ b/hw_fence/Kconfig @@ -0,0 +1,4 @@ +config QTI_HW_FENCE + bool "HW Fence" + help + Enable the hw_fence module \ No newline at end of file diff --git a/hw_fence/defconfig b/hw_fence/defconfig new file mode 100644 index 0000000000..f80d4f65f7 --- /dev/null +++ b/hw_fence/defconfig @@ -0,0 +1 @@ +CONFIG_QTI_HW_FENCE=y diff --git a/hw_fence/define_hw_fence.bzl b/hw_fence/define_hw_fence.bzl new file mode 100644 index 0000000000..1598ed183b --- /dev/null +++ b/hw_fence/define_hw_fence.bzl @@ -0,0 +1,46 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_module", "ddk_submodule") +load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") +load("//msm-kernel:target_variants.bzl", "get_all_variants") + +def _define_module(target, variant): + tv = "{}_{}".format(target, variant) + ddk_module( + name = "{}_msm_hw_fence".format(tv), + srcs = [ + "src/hw_fence_drv_debug.c", + "src/hw_fence_drv_ipc.c", + "src/hw_fence_drv_priv.c", + "src/hw_fence_drv_utils.c", + "src/msm_hw_fence.c", + "src/msm_hw_fence_synx_translation.c", + ], + out = "msm_hw_fence.ko", + defconfig = "defconfig", + kconfig = "Kconfig", + conditional_srcs = { + "CONFIG_DEBUG_FS": { + True: ["src/hw_fence_ioctl.c"], + }, + }, + deps = [ + "//msm-kernel:all_headers", + "//vendor/qcom/opensource/synx-kernel:synx_headers", + "//vendor/qcom/opensource/mm-drivers:mm_drivers_headers", + ], + kernel_build = "//msm-kernel:{}".format(tv), + ) + + copy_to_dist_dir( + name = "{}_msm_hw_fence_dist".format(tv), + data = [":{}_msm_hw_fence".format(tv)], + dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target), + flat = True, + wipe_dist_dir = False, + allow_duplicate_filenames = False, + mode_overrides = {"**/*": "644"}, + log = "info", + ) + +def define_hw_fence(): + for (t, v) in get_all_variants(): + _define_module(t, v) diff --git a/mm_module_build.bzl b/mm_module_build.bzl deleted file mode 100644 index dc708705b5..0000000000 --- a/mm_module_build.bzl +++ /dev/null @@ -1,103 +0,0 @@ -load("//build/kernel/kleaf:kernel.bzl", "ddk_module","ddk_submodule") -load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") -load("//msm-kernel:target_variants.bzl", "get_all_variants") - -def _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps): - processed_config_srcs = {} - - for config_src_name in config_srcs: - config_src = config_srcs[config_src_name] - - if type(config_src) == "list": - processed_config_srcs[config_src_name] = {True: config_src} - else: - processed_config_srcs[config_src_name] = config_src - - module = struct( - name = name, - path = path, - srcs = srcs, - config_srcs = processed_config_srcs, - config_option = config_option, - deps = deps, - ) - - module_map[name] = module - -def _get_config_choices(map, options): - choices = [] - for option in map: - choices.extend(map[option].get(option in options,[])) - return choices - -def _get_kernel_build_options(modules, config_options): - all_options = {option: True for option in config_options} - all_options = all_options | {module.config_option: True for module in modules if module.config_option} - return all_options - -def _get_kernel_build_module_srcs(module, options, formatter): - srcs = module.srcs + _get_config_choices(module.config_srcs, options) - print("-",module.name,",",module.config_option,",srcs =",srcs) - module_path = "{}/".format(module.path) if module.path else "" - return ["{}{}".format(module_path, formatter(src)) for src in srcs] - -def _get_kernel_build_module_deps(module, options, formatter): - return [formatter(dep) for dep in module.deps] - -def mm_driver_module_entry(hdrs = []): - module_map = {} - - def register(name, path = None, config_option = None, srcs = [], config_srcs = {}, deps =[]): - _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps) - return struct( - register = register, - get = module_map.get, - hdrs = hdrs, - module_map = module_map - ) - -def define_target_variant_modules(target, variant, registry, modules, config_options = []): - kernel_build = "{}_{}".format(target, variant) - kernel_build_label = "//msm-kernel:{}".format(kernel_build) - modules = [registry.get(module_name) for module_name in modules] - options = _get_kernel_build_options(modules, config_options) - build_print = lambda message : print("{}: {}".format(kernel_build, message)) - formatter = lambda s : s.replace("%b", kernel_build).replace("%t", target) - headers = ["//msm-kernel:all_headers"] + registry.hdrs - all_module_rules = [] - - for module in modules: - rule_name = "{}_{}".format(kernel_build, module.name) - module_srcs = _get_kernel_build_module_srcs(module, options, formatter) - - if not module_srcs: - continue - - ddk_submodule( - name = rule_name, - srcs = module_srcs, - out = "{}.ko".format(module.name), - deps = headers + _get_kernel_build_module_deps(module, options, formatter), - local_defines = options.keys(), - ) - all_module_rules.append(rule_name) - - ddk_module( - name = "{}_mm_drivers".format(kernel_build), - kernel_build = kernel_build_label, - deps = all_module_rules, - ) - copy_to_dist_dir( - name = "{}_mm_drivers_dist".format(kernel_build), - data = [":{}_mm_drivers".format(kernel_build)], - dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target), - flat = True, - wipe_dist_dir = False, - allow_duplicate_filenames = False, - mode_overrides = {"**/*": "644"}, - log = "info", - ) - -def define_consolidate_gki_modules(target, registry, modules, config_options = []): - for (targets, variant) in get_all_variants(): - define_target_variant_modules(targets, variant, registry, modules, config_options) \ No newline at end of file diff --git a/mm_modules.bzl b/mm_modules.bzl deleted file mode 100644 index ef8b175e79..0000000000 --- a/mm_modules.bzl +++ /dev/null @@ -1,44 +0,0 @@ -load(":mm_module_build.bzl", "mm_driver_module_entry") - -HW_FENCE_PATH = "hw_fence" -MSM_EXT_DISPLAY_PATH = "msm_ext_display" -SYNC_FENCE_PATH = "sync_fence" - -mm_driver_modules = mm_driver_module_entry([":mm_drivers_headers"]) -module_entry = mm_driver_modules.register - -#--------------- MM-DRIVERS MODULES ------------------ - -module_entry( - name = "hw_fence", - path = HW_FENCE_PATH + "/src", - config_option = "CONFIG_QTI_HW_FENCE", - config_srcs = { - "CONFIG_DEBUG_FS" : [ - "hw_fence_ioctl.c", - ] - }, - srcs = ["hw_fence_drv_debug.c", - "hw_fence_drv_ipc.c", - "hw_fence_drv_priv.c", - "hw_fence_drv_utils.c", - "msm_hw_fence.c", - "msm_hw_fence_synx_translation.c"], - deps =[ - "//vendor/qcom/opensource/synx-kernel:synx_headers" - ] -) - -module_entry( - name = "msm_ext_display", - path = MSM_EXT_DISPLAY_PATH + "/src", - config_option = "CONFIG_MSM_EXT_DISPLAY", - srcs = ["msm_ext_display.c"], -) - -module_entry( - name = "sync_fence", - path = SYNC_FENCE_PATH + "/src", - config_option = "CONFIG_QCOM_SPEC_SYNC", - srcs = ["qcom_sync_file.c"], -) \ No newline at end of file diff --git a/msm_ext_display/BUILD.bazel b/msm_ext_display/BUILD.bazel new file mode 100644 index 0000000000..5f30f80c12 --- /dev/null +++ b/msm_ext_display/BUILD.bazel @@ -0,0 +1,10 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") +load(":define_msm_ext_display.bzl", "define_msm_ext_display") + +package( + default_visibility = [ + "//visibility:public" + ], +) + +define_msm_ext_display() diff --git a/msm_ext_display/Kconfig b/msm_ext_display/Kconfig new file mode 100644 index 0000000000..a7257e499a --- /dev/null +++ b/msm_ext_display/Kconfig @@ -0,0 +1,4 @@ +config MSM_EXT_DISPLAY + bool "Enable msm_ext_display" + help + Enable msm_ext_display driver diff --git a/msm_ext_display/defconfig b/msm_ext_display/defconfig new file mode 100644 index 0000000000..53017a5990 --- /dev/null +++ b/msm_ext_display/defconfig @@ -0,0 +1 @@ +CONFIG_MSM_EXT_DISPLAY=y diff --git a/msm_ext_display/define_msm_ext_display.bzl b/msm_ext_display/define_msm_ext_display.bzl new file mode 100644 index 0000000000..3287983898 --- /dev/null +++ b/msm_ext_display/define_msm_ext_display.bzl @@ -0,0 +1,31 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_module", "ddk_submodule") +load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") +load("//msm-kernel:target_variants.bzl", "get_all_variants") + +def _define_module(target, variant): + tv = "{}_{}".format(target, variant) + ddk_module( + name = "{}_msm_ext_display".format(tv), + srcs = ["src/msm_ext_display.c"], + out = "msm_ext_display.ko", + defconfig = "defconfig", + kconfig = "Kconfig", + deps = ["//msm-kernel:all_headers", + "//vendor/qcom/opensource/mm-drivers:mm_drivers_headers"], + kernel_build = "//msm-kernel:{}".format(tv), + ) + + copy_to_dist_dir( + name = "{}_msm_ext_display_dist".format(tv), + data = [":{}_msm_ext_display".format(tv)], + dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target), + flat = True, + wipe_dist_dir = False, + allow_duplicate_filenames = False, + mode_overrides = {"**/*": "644"}, + log = "info", + ) + +def define_msm_ext_display(): + for (t, v) in get_all_variants(): + _define_module(t, v) diff --git a/sync_fence/BUILD.bazel b/sync_fence/BUILD.bazel new file mode 100644 index 0000000000..8da9507b61 --- /dev/null +++ b/sync_fence/BUILD.bazel @@ -0,0 +1,16 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") +load(":define_sync_fence.bzl", "define_sync_fence") + +package( + default_visibility = [ + "//visibility:public" + ], +) + +ddk_headers( + name = "sync_fence_uapi_headers", + hdrs = glob(["include/uapi/sync_fence/*.h"]), + includes = ["include"] +) + +define_sync_fence() diff --git a/sync_fence/Kconfig b/sync_fence/Kconfig new file mode 100644 index 0000000000..6422d5cafa --- /dev/null +++ b/sync_fence/Kconfig @@ -0,0 +1,4 @@ +config QCOM_SPEC_SYNC + bool "Enable spec fence" + help + Enable sync_fence driver \ No newline at end of file diff --git a/sync_fence/defconfig b/sync_fence/defconfig new file mode 100644 index 0000000000..33c414d0f9 --- /dev/null +++ b/sync_fence/defconfig @@ -0,0 +1 @@ +CONFIG_QCOM_SPEC_SYNC=y diff --git a/sync_fence/define_sync_fence.bzl b/sync_fence/define_sync_fence.bzl new file mode 100644 index 0000000000..b7dcf21700 --- /dev/null +++ b/sync_fence/define_sync_fence.bzl @@ -0,0 +1,33 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_module") +load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") +load("//msm-kernel:target_variants.bzl", "get_all_variants") + +def _define_module(target, variant): + tv = "{}_{}".format(target, variant) + ddk_module( + name = "{}_sync_fence".format(tv), + srcs = ["src/qcom_sync_file.c"], + out = "sync_fence.ko", + kconfig = "Kconfig", + defconfig = "defconfig", + deps = [ + "//msm-kernel:all_headers", + "//vendor/qcom/opensource/mm-drivers:mm_drivers_headers", + ], + kernel_build = "//msm-kernel:{}".format(tv), + ) + + copy_to_dist_dir( + name = "{}_sync_fence_dist".format(tv), + data = [":{}_sync_fence".format(tv)], + dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target), + flat = True, + wipe_dist_dir = False, + allow_duplicate_filenames = False, + mode_overrides = {"**/*": "644"}, + log = "info", + ) + +def define_sync_fence(): + for (t, v) in get_all_variants(): + _define_module(t, v) diff --git a/target.bzl b/target.bzl deleted file mode 100644 index 6d63bab3c8..0000000000 --- a/target.bzl +++ /dev/null @@ -1,16 +0,0 @@ -load(":mm_modules.bzl", "mm_driver_modules") -load(":mm_module_build.bzl", "define_consolidate_gki_modules") - -def define_pineapple(): - define_consolidate_gki_modules( - target = "pineapple", - registry = mm_driver_modules, - modules = [ - "hw_fence", - "msm_ext_display", - "sync_fence", - ], - config_options = [ - "CONFIG_DEBUG_FS", - ], -) \ No newline at end of file From 72d7c6a3c5706d87e86c3591975058083d895b1b Mon Sep 17 00:00:00 2001 From: Yu Wu Date: Tue, 13 Jun 2023 19:10:30 +0800 Subject: [PATCH 076/166] mm-drivers: hw_fence: avoid dereference before NULL check Fix hw_fence_wait_client dereferenced before NULL check. Change-Id: Ib34c4969c9042f4f815b2eca75b553bc23d4b6cc Signed-off-by: Yu Wu --- hw_fence/src/hw_fence_drv_priv.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index e084e61882..52d1159b2f 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1634,14 +1634,17 @@ static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, for (wait_client_id = 0; wait_client_id <= drv_data->rxq_clients_num; wait_client_id++) { if (hw_fence->wait_client_mask & BIT(wait_client_id)) { hw_fence_wait_client = drv_data->clients[wait_client_id]; + + if (!hw_fence_wait_client) + continue; + data_id = hw_fence_get_client_data_id(hw_fence_wait_client->client_id_ext); if (data_id < HW_FENCE_MAX_CLIENTS_WITH_DATA) client_data = hw_fence->client_data[data_id]; - if (hw_fence_wait_client) - _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence, - hash, 0, client_data, error); + _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence, + hash, 0, client_data, error); } } } From 129a3797e8dd1163aa49809b26355c8bb0573584 Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 7 Apr 2023 13:18:37 -0700 Subject: [PATCH 077/166] mm-drivers: hw_fence: add guard to avoid redefinition of synx client IDs On some targets, the synx api defines synx hwfence client IDs. Add guard to prevent redefinitions. Change-Id: If947aa39fa15756c7845613d666dbea84adc3a4b Signed-off-by: Grace An --- hw_fence/include/msm_hw_fence_synx_translation.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hw_fence/include/msm_hw_fence_synx_translation.h b/hw_fence/include/msm_hw_fence_synx_translation.h index 1235d7639e..b1724b588b 100644 --- a/hw_fence/include/msm_hw_fence_synx_translation.h +++ b/hw_fence/include/msm_hw_fence_synx_translation.h @@ -8,12 +8,13 @@ #include +extern bool hw_fence_driver_enable; + +#ifndef SYNX_HW_FENCE_CLIENT_START #define SYNX_HW_FENCE_CLIENT_START 1024 #define SYNX_HW_FENCE_CLIENT_END 4096 #define SYNX_MAX_SIGNAL_PER_CLIENT 64 -extern bool hw_fence_driver_enable; - /** * enum synx_client_id : Unique identifier of the supported clients * @@ -80,6 +81,7 @@ enum synx_hwfence_client_id { SYNX_MAX_SIGNAL_PER_CLIENT, SYNX_CLIENT_HW_FENCE_MAX = SYNX_HW_FENCE_CLIENT_END, }; +#endif #if IS_ENABLED(CONFIG_QTI_HW_FENCE) /** From 32086fbaa0789f8da4500ef1d0c72169d68fd98e Mon Sep 17 00:00:00 2001 From: Abhijith Desai Date: Fri, 25 Aug 2023 18:39:15 +0530 Subject: [PATCH 078/166] mm-drivers: hw_fence: add guard to avoid redefinition of synx client IDs On some targets, the synx api defines synx hwfence client IDs. Add guard to prevent redefinitions. Change-Id: If947aa39fa15756c7845613d666dbea84adc3a4b Signed-off-by: Grace An Signed-off-by: Abhijith Desai --- hw_fence/include/msm_hw_fence_synx_translation.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hw_fence/include/msm_hw_fence_synx_translation.h b/hw_fence/include/msm_hw_fence_synx_translation.h index 1235d7639e..b1724b588b 100644 --- a/hw_fence/include/msm_hw_fence_synx_translation.h +++ b/hw_fence/include/msm_hw_fence_synx_translation.h @@ -8,12 +8,13 @@ #include +extern bool hw_fence_driver_enable; + +#ifndef SYNX_HW_FENCE_CLIENT_START #define SYNX_HW_FENCE_CLIENT_START 1024 #define SYNX_HW_FENCE_CLIENT_END 4096 #define SYNX_MAX_SIGNAL_PER_CLIENT 64 -extern bool hw_fence_driver_enable; - /** * enum synx_client_id : Unique identifier of the supported clients * @@ -80,6 +81,7 @@ enum synx_hwfence_client_id { SYNX_MAX_SIGNAL_PER_CLIENT, SYNX_CLIENT_HW_FENCE_MAX = SYNX_HW_FENCE_CLIENT_END, }; +#endif #if IS_ENABLED(CONFIG_QTI_HW_FENCE) /** From 6b82d73e1de6b7e5fe38811ca2203bfcd835f49a Mon Sep 17 00:00:00 2001 From: Abhijith Desai Date: Fri, 25 Aug 2023 18:39:58 +0530 Subject: [PATCH 079/166] mm-drivers: hw_fence: fix hw fence driver header path Modify hw-fence driver header path to compile from display si. Change-Id: I58e5aa3cdce430be0cc5488b8a0cd6c2d68a9fc5 Signed-off-by: Ingrid Gallardo Signed-off-by: Abhijith Desai --- hw_fence/include/hw_fence_drv_priv.h | 2 +- hw_fence/src/msm_hw_fence_synx_translation.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index fb4caacf43..f3d088065c 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -10,9 +10,9 @@ #include #include #include -#include #include #include +#include "msm_hw_fence.h" /* max u64 to indicate invalid fence */ #define HW_FENCE_INVALID_PARENT_FENCE (~0ULL) diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 6970eb4d60..ca109646fa 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -5,7 +5,7 @@ #include #include -#include +#include "msm_hw_fence.h" #include "msm_hw_fence_synx_translation.h" #include "hw_fence_drv_priv.h" #include "hw_fence_drv_debug.h" From 9df969e3f9e67fab01769c68557d3e3297acc253 Mon Sep 17 00:00:00 2001 From: Abhijith Desai Date: Fri, 25 Aug 2023 18:41:33 +0530 Subject: [PATCH 080/166] mm-drivers: hw_fence: add snapshot of hw-fence driver header in Display SI Add snapshot of the hw fence driver header as of qcom-6.4 commit 76b6fe6f907d ("defconfig: Enable PM8008 regulator driver for BLAIR") into Display SI. Change-Id: Id57863a2ecbb043ae953adbda1b55630872e2b8f Signed-off-by: Ingrid Gallardo Signed-off-by: Abhijith Desai --- hw_fence/include/msm_hw_fence.h | 641 ++++++++++++++++++++++++++++++++ 1 file changed, 641 insertions(+) create mode 100644 hw_fence/include/msm_hw_fence.h diff --git a/hw_fence/include/msm_hw_fence.h b/hw_fence/include/msm_hw_fence.h new file mode 100644 index 0000000000..62c0f3ba85 --- /dev/null +++ b/hw_fence/include/msm_hw_fence.h @@ -0,0 +1,641 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __MSM_HW_FENCE_H +#define __MSM_HW_FENCE_H + +#include +#include + +/** + * MSM_HW_FENCE_FLAG_ENABLED_BIT - Hw-fence is enabled for the dma_fence. + * + * Drivers set this flag in the dma_fence 'flags' to fences that + * are backed up by a hw-fence. + */ +#define MSM_HW_FENCE_FLAG_ENABLED_BIT 31 + +/** + * MSM_HW_FENCE_FLAG_SIGNALED_BIT - Hw-fence is signaled for the dma_fence. + * + * This flag is set by hw-fence driver when a client wants to add itself as + * a waiter for this hw-fence. The client uses this flag to avoid adding itself + * as a waiter for a fence that is already retired. + */ +#define MSM_HW_FENCE_FLAG_SIGNALED_BIT 30 + +/** + * MSM_HW_FENCE_ERROR_RESET - Hw-fence flagged as error due to forced reset from producer. + */ +#define MSM_HW_FENCE_ERROR_RESET BIT(0) + +/** + * MSM_HW_FENCE_RESET_WITHOUT_ERROR: Resets client and its hw-fences, signaling them without error. + * MSM_HW_FENCE_RESET_WITHOUT_DESTROY: Resets client and its hw-fences, signaling without + * destroying the fences. + */ +#define MSM_HW_FENCE_RESET_WITHOUT_ERROR BIT(0) +#define MSM_HW_FENCE_RESET_WITHOUT_DESTROY BIT(1) + +/** + * MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE: Updates client tx queue error by moving fence with error to + * beginning of queue. + */ +#define MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE BIT(0) + +/** + * MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - Maximum number of signals per client + */ +#define MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT 64 + +/** + * MSM_HW_FENCE_DBG_DUMP_QUEUES: Dumps queues information + * MSM_HW_FENCE_DBG_DUMP_TABLE: Dumps hwfence table + * MSM_HW_FENCE_DBG_DUMP_EVENTS: Dumps hwfence ctl events + */ +#define MSM_HW_FENCE_DBG_DUMP_QUEUES BIT(0) +#define MSM_HW_FENCE_DBG_DUMP_TABLE BIT(1) +#define MSM_HW_FENCE_DBG_DUMP_EVENTS BIT(2) + +/** + * struct msm_hw_fence_create_params - Creation parameters. + * + * @name : Optional parameter associating a name with the object for debug purposes. + * Only first 64 bytes are accepted, rest will be ignored. + * @handle : Pointer to fence handle (filled by function). + * @fence : Pointer to fence. + * @flags : flags for customization. + */ +struct msm_hw_fence_create_params { + const char *name; + u64 *handle; + void *fence; + u32 flags; +}; + +/** + * struct msm_hw_fence_hfi_queue_table_header - HFI queue table structure. + * @version: HFI protocol version. + * @size: Queue table size in dwords. + * @qhdr0_offset: First queue header offset (dwords) in this table. + * @qhdr_size: Queue header size. + * @num_q: Number of queues defined in this table. + * @num_active_q: Number of active queues. + */ +struct msm_hw_fence_hfi_queue_table_header { + u32 version; + u32 size; + u32 qhdr0_offset; + u32 qhdr_size; + u32 num_q; + u32 num_active_q; +}; + +/** + * struct msm_hw_fence_hfi_queue_header - HFI queue header structure. + * @status: Active = 1, Inactive = 0. + * @start_addr: Starting address of the queue. + * @type: Queue type (rx/tx). + * @queue_size: Size of the queue. + * @pkt_size: Size of the queue packet entries, + * 0 - means variable size of message in the queue, + * non-zero - size of the packet, fixed. + * @pkt_drop_cnt: Number of packets drop by sender. + * @rx_wm: Receiver watermark, applicable in event driven mode. + * @tx_wm: Sender watermark, applicable in event driven mode. + * @rx_req: Receiver sets this bit if queue is empty. + * @tx_req: Sender sets this bit if queue is full. + * @rx_irq_status: Receiver sets this bit and triggers an interrupt to the + * sender after packets are dequeued. Sender clears this bit. + * @tx_irq_status: Sender sets this bit and triggers an interrupt to the + * receiver after packets are queued. Receiver clears this bit. + * @read_index: read index of the queue. + * @write_index: write index of the queue. + */ +struct msm_hw_fence_hfi_queue_header { + u32 status; + u32 start_addr; + u32 type; + u32 queue_size; + u32 pkt_size; + u32 pkt_drop_cnt; + u32 rx_wm; + u32 tx_wm; + u32 rx_req; + u32 tx_req; + u32 rx_irq_status; + u32 tx_irq_status; + u32 read_index; + u32 write_index; +}; + +/** + * struct msm_hw_fence_mem_addr - Memory descriptor of the queue allocated by + * the fence driver for each client during + * register. + * @virtual_addr: Kernel virtual address of the queue. + * @device_addr: Physical address of the memory object. + * @size: Size of the memory. + * @mem_data: Internal pointer with the attributes of the allocation. + */ +struct msm_hw_fence_mem_addr { + void *virtual_addr; + phys_addr_t device_addr; + u64 size; + void *mem_data; +}; + +/** + * struct msm_hw_fence_cb_data - Data passed back in fence error callback. + * @data: data registered with callback + * @fence: fence signaled with error + */ +struct msm_hw_fence_cb_data { + void *data; + struct dma_fence *fence; +}; + +/** + * msm_hw_fence_error_cb: Callback function registered by waiting clients. + * Dispatched when client is waiting on a fence + * signaled with error. + * + * @handle: handle of fence signaled with error + * @error: error signed for fence + * @cb_data: pointer to struct containing opaque pointer registered with callback + * and fence information + */ +typedef void (*msm_hw_fence_error_cb_t)(u32 handle, int error, void *cb_data); + +/** + * enum hw_fence_client_id - Unique identifier of the supported clients. + * @HW_FENCE_CLIENT_ID_CTX0: GFX Client. + * @HW_FENCE_CLIENT_ID_CTL0: DPU Client 0. + * @HW_FENCE_CLIENT_ID_CTL1: DPU Client 1. + * @HW_FENCE_CLIENT_ID_CTL2: DPU Client 2. + * @HW_FENCE_CLIENT_ID_CTL3: DPU Client 3. + * @HW_FENCE_CLIENT_ID_CTL4: DPU Client 4. + * @HW_FENCE_CLIENT_ID_CTL5: DPU Client 5. + * @HW_FENCE_CLIENT_ID_VAL0: debug Validation client 0. + * @HW_FENCE_CLIENT_ID_VAL1: debug Validation client 1. + * @HW_FENCE_CLIENT_ID_VAL2: debug Validation client 2. + * @HW_FENCE_CLIENT_ID_VAL3: debug Validation client 3. + * @HW_FENCE_CLIENT_ID_VAL4: debug Validation client 4. + * @HW_FENCE_CLIENT_ID_VAL5: debug Validation client 5. + * @HW_FENCE_CLIENT_ID_VAL6: debug Validation client 6. + * @HW_FENCE_CLIENT_ID_IPE: IPE Client. + * @HW_FENCE_CLIENT_ID_VPU: VPU Client. + * @HW_FENCE_CLIENT_ID_IFE0: IFE0 Client 0. + * @HW_FENCE_CLIENT_ID_IFE1: IFE1 Client 0. + * @HW_FENCE_CLIENT_ID_IFE2: IFE2 Client 0. + * @HW_FENCE_CLIENT_ID_IFE3: IFE3 Client 0. + * @HW_FENCE_CLIENT_ID_IFE4: IFE4 Client 0. + * @HW_FENCE_CLIENT_ID_IFE5: IFE5 Client 0. + * @HW_FENCE_CLIENT_ID_IFE6: IFE6 Client 0. + * @HW_FENCE_CLIENT_ID_IFE7: IFE7 Client 0. + * @HW_FENCE_CLIENT_MAX: Max number of clients, any client must be added + * before this enum. + */ +enum hw_fence_client_id { + HW_FENCE_CLIENT_ID_CTX0 = 0x1, + HW_FENCE_CLIENT_ID_CTL0, + HW_FENCE_CLIENT_ID_CTL1, + HW_FENCE_CLIENT_ID_CTL2, + HW_FENCE_CLIENT_ID_CTL3, + HW_FENCE_CLIENT_ID_CTL4, + HW_FENCE_CLIENT_ID_CTL5, + HW_FENCE_CLIENT_ID_VAL0, + HW_FENCE_CLIENT_ID_VAL1, + HW_FENCE_CLIENT_ID_VAL2, + HW_FENCE_CLIENT_ID_VAL3, + HW_FENCE_CLIENT_ID_VAL4, + HW_FENCE_CLIENT_ID_VAL5, + HW_FENCE_CLIENT_ID_VAL6, + HW_FENCE_CLIENT_ID_IPE, + HW_FENCE_CLIENT_ID_VPU = HW_FENCE_CLIENT_ID_IPE + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE0 = HW_FENCE_CLIENT_ID_VPU + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE1 = HW_FENCE_CLIENT_ID_IFE0 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE2 = HW_FENCE_CLIENT_ID_IFE1 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE3 = HW_FENCE_CLIENT_ID_IFE2 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE4 = HW_FENCE_CLIENT_ID_IFE3 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE5 = HW_FENCE_CLIENT_ID_IFE4 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE6 = HW_FENCE_CLIENT_ID_IFE5 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE7 = HW_FENCE_CLIENT_ID_IFE6 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_MAX = HW_FENCE_CLIENT_ID_IFE7 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT +}; + +#if IS_ENABLED(CONFIG_QTI_HW_FENCE) +/** + * msm_hw_fence_register() - Registers a client with the HW Fence Driver. + * @client_id: ID of the client that is being registered. + * @mem_descriptor: Pointer to fill the memory descriptor. Fence + * controller driver fills this pointer with the + * memory descriptor for the rx/tx queues. + * + * This call initializes any shared memory region for the tables/queues + * required for the HW Fence Driver to communicate with Fence Controller + * for this client_id and fills the memory descriptor for the queues + * that the client hw cores need to manage. + * + * Return: Handle to the client object that must be used for further calls + * to the fence controller driver or NULL in case of error. + * + * The returned handle is used internally by the fence controller driver + * in further calls to identify the client and access any resources + * allocated for this client. + */ +void *msm_hw_fence_register( + enum hw_fence_client_id client_id, + struct msm_hw_fence_mem_addr *mem_descriptor); + +/** + * msm_hw_fence_deregister() - Deregisters a client that was previously + * registered with the HW Fence Driver. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_deregister(void *client_handle); + +/** + * msm_hw_fence_create() - Creates a new hw fence. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @params: Hw fence creation parameters containing dma fence + * to create its associated hw-fence. + * + * This call creates the hw fence and registers it with the fence + * controller. After the creation of this fence, it is a Client Driver + * responsibility to 'destroy' this fence to prevent any leakage of + * hw-fence resources. + * To destroy a fence, 'msm_hw_fence_destroy' must be called, once the + * fence is not required anymore, which is when all the references to + * the dma-fence are released. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_create(void *client_handle, + struct msm_hw_fence_create_params *params); + +/** + * msm_hw_fence_destroy() - Destroys a hw fence. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @fence: Sw dma-fence to destroy its associated hw-fence. + * + * The fence destroyed by this function, is a fence that must have been + * created by the hw fence driver through 'msm_hw_fence_create' call. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_destroy(void *client_handle, + struct dma_fence *fence); + +/** + * msm_hw_fence_destroy_with_handle() - Destroys a hw fence through its handle. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @handle: handle for hw-fence to destroy + * + * The fence destroyed by this function, is a fence that must have been + * created by the hw fence driver through 'msm_hw_fence_create' call. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle); + +/** + * msm_hw_fence_wait_update_v2() - Register or unregister the Client with the + * Fence Controller as a waiting-client of the + * list of fences received as parameter. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @fences: Pointer to an array of pointers containing the fences to + * 'wait-on' for this client. If a 'fence-array' fence is passed, + * driver will iterate through the individual 'fences' which are + * part of the 'fence-array' and will register to wait-for-all the + * individual fences of the fence-array. + * A 'fence-array' passed as parameter can only have 'individual' + * fences and cannot have another nested 'fence-array', + * otherwise this API will return failure. + * Also, all the 'fences' in this list must have a corresponding + * hw-fence that was registered by the producer of the fence, + * otherwise, this API will return failure. + * @handles: Optional pointer to an array of handles of 'fences'. + * If non-null, these handles are filled by the function. + * This list must have the same size as 'fences' if present. + * @client_data_list: Optional pointer to an array of u64 client_data + * values for each fence in 'fences'. + * If non-null, this list must have the same size as + * the 'fences' list. This client registers each fence + * with the client_data value at the same index so that + * this value is returned to the client upon signaling + * of the fence. + * If a null pointer is provided, a default value of + * zero is registered as the client_data of each fence. + * @num_fences: Number of elements in the 'fences' list (and 'handles' and + * 'client_data_list' if either or both are present). + * @reg: Boolean to indicate if register or unregister for waiting on + * the hw-fence. + * + * If the 'register' boolean is set as true, this API will register with + * the Fence Controller the Client as a consumer (i.e. 'wait-client') of + * the fences received as parameter. + * Function will return immediately after the client was registered + * (i.e this function does not wait for the fences to be signaled). + * When any of the Fences received as parameter is signaled (or all the + * fences in case of a fence-array), Fence controller will trigger the hw + * signal to notify the Client hw-core about the signaled fence (or fences + * in case of a fence array). i.e. signalization of the hw fence it is a + * hw to hw communication between Fence Controller and the Client hw-core, + * and this API is only the interface to allow the Client Driver to + * register its Client hw-core for the hw-to-hw notification. + * If the 'register' boolean is set as false, this API will unregister + * with the Fence Controller the Client as a consumer, this is used for + * cases where a Timeout waiting for a fence occurs and client drivers want + * to unregister for signal. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_wait_update_v2(void *client_handle, + struct dma_fence **fences, u64 *handles, u64 *client_data_list, u32 num_fences, bool reg); + +/** + * msm_hw_fence_wait_update() - Register or unregister the Client with the + * Fence Controller as a waiting-client of the + * list of fences received as parameter. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @fences: Pointer to an array of pointers containing the fences to + * 'wait-on' for this client. If a 'fence-array' fence is passed, + * driver will iterate through the individual 'fences' which are + * part of the 'fence-array' and will register to wait-for-all the + * individual fences of the fence-array. + * A 'fence-array' passed as parameter can only have 'individual' + * fences and cannot have another nested 'fence-array', + * otherwise this API will return failure. + * Also, all the 'fences' in this list must have a corresponding + * hw-fence that was registered by the producer of the fence, + * otherwise, this API will return failure. + * @num_fences: Number of elements in the 'fences' list. + * @reg: Boolean to indicate if register or unregister for waiting on + * the hw-fence. + * + * If the 'register' boolean is set as true, this API will register with + * the Fence Controller the Client as a consumer (i.e. 'wait-client') of + * the fences received as parameter. + * Function will return immediately after the client was registered + * (i.e this function does not wait for the fences to be signaled). + * When any of the Fences received as parameter is signaled (or all the + * fences in case of a fence-array), Fence controller will trigger the hw + * signal to notify the Client hw-core about the signaled fence (or fences + * in case of a fence array). i.e. signalization of the hw fence it is a + * hw to hw communication between Fence Controller and the Client hw-core, + * and this API is only the interface to allow the Client Driver to + * register its Client hw-core for the hw-to-hw notification. + * If the 'register' boolean is set as false, this API will unregister + * with the Fence Controller the Client as a consumer, this is used for + * cases where a Timeout waiting for a fence occurs and client drivers want + * to unregister for signal. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_wait_update(void *client_handle, + struct dma_fence **fences, u32 num_fences, bool reg); + +/** + * msm_hw_fence_reset_client() - Resets the HW Fence Client. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @reset_flags: Flags to choose the reset type. See MSM_HW_FENCE_RESET_* + * definitions. + * + * This function iterates through the HW Fences and removes the client + * from the waiting-client mask in any of the HW Fences and signal the + * fences owned by that client. + * This function should only be called by clients upon error, when clients + * did a HW reset, to make sure any HW Fence where the client was register + * for wait are removed, and any Fence owned by the client are signaled. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags); + +/** + * msm_hw_fence_reset_client_by_id() - Resets the HW Fence Client through + * its id. + * @client_id: id of client to reset + * @reset_flags: Flags to choose the reset type. See MSM_HW_FENCE_RESET_* + * definitions. + * + * This function iterates through the HW Fences and removes the client + * from the waiting-client mask in any of the HW Fences and signal the + * fences owned by that client. + * This function should only be called by clients upon error, when clients + * did a HW reset, to make sure any HW Fence where the client was register + * for wait are removed, and any Fence owned by the client are signaled. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id, u32 reset_flags); + +/** + * msm_hw_fence_update_txq() - Updates Client Tx Queue with the Fence info. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @handle: handle for fence to update in the Tx Queue. + * @flags: flags to set in the queue for the fence. + * @error: error to set in the queue for the fence. + * + * This function should only be used by clients that cannot have the Tx Queue + * updated by the Firmware or the HW Core. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error); + +/** + * msm_hw_fence_update_txq_error() - Updates error field for fence already in Tx Queue. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @handle: handle for existing fence in Tx Queue to update. + * @error: error to set in the queue for the fence. + * @update_flags: flags to choose the update type. See MSM_HW_FENCE_UPDATE_ERROR_* + * definitions. + * + * This function should only be used by clients that cannot have the Tx Queue + * updated by the Firmware or the HW Core. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, u32 update_flags); + +/** + * msm_hw_fence_trigger_signal() - Triggers signal for the tx/rx signal pair + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @ tx_client_id: id of the client triggering the signal. + * @ rx_client_id: id of the client receiving the signal. + * @ signal_id: id of the signal to trigger + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_trigger_signal(void *client_handle, u32 tx_client_id, u32 rx_client_id, + u32 signal_id); + +/** + * msm_hw_fence_register_error_cb() - Register callback to be dispatched when + * HW Fence Client is waiting for a fence + * that is signaled with error. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @cb: pointer to callback function to be invoked + * @data: opaque pointer passed back with callback + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t cb, void *data); + +/** + * msm_hw_fence_deregister_error_cb() - Deregister callback to be dispatched when + * HW Fence Client is waiting for a fence + * that is signaled with error. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_deregister_error_cb(void *client_handle); + +#else +static inline void *msm_hw_fence_register(enum hw_fence_client_id client_id, + struct msm_hw_fence_mem_addr *mem_descriptor) +{ + return NULL; +} + +static inline int msm_hw_fence_deregister(void *client_handle) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_create(void *client_handle, + struct msm_hw_fence_create_params *params) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_destroy(void *client_handle, struct dma_fence *fence) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_wait_update_v2(void *client_handle, + struct dma_fence **fences, u64 *handles, u64 *client_data_list, u32 num_fences, bool reg) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_wait_update(void *client_handle, + struct dma_fence **fences, u32 num_fences, bool reg) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id, + u32 reset_flags) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, + u32 update_flags) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_trigger_signal(void *client_handle, u32 tx_client_id, + u32 rx_client_id, u32 signal_id) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t cb, + void *data) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_deregister_error_cb(void *client_handle) +{ + return -EINVAL; +} +#endif + +#if IS_ENABLED(CONFIG_DEBUG_FS) && IS_ENABLED(CONFIG_QTI_HW_FENCE) +/** + * msm_hw_fence_dump_debug_data() - Dumps debug data information + * @client_handle: Hw fence driver client handle returned during 'msm_hw_fence_register'. + * @dump_flags: Flags to indicate which info to dump, see MSM_HW_FENCE_DBG_DUMP_** flags. + * @dump_clients_mask: Optional bitmask to indicate along with the caller of the api, which other + * clients to dump data from. E.g. a client like display might want to dump + * info of any all other clients from which it can receive fences, like gfx. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, u32 dump_clients_mask); + +/** + * msm_hw_fence_dump_debug_data() - Dumps hw-fence information for dma-fence + * @client_handle: Hw fence driver client handle returned during 'msm_hw_fence_register'. + * @fence: dma_fence to dump hw-fence information + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence); + +#else +static inline int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, + u32 dump_clients_mask) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence) +{ + return -EINVAL; +} +#endif + +#endif From ebb15a1d23590903f1a968e82dee42a63d51e084 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 4 Oct 2023 21:15:36 -0700 Subject: [PATCH 081/166] mm-drivers: hw_fence: resolve compilation failures for hw fencing Resolve compilation issues in hw-fence module. Change-Id: I50113a00db3c99139d72ba8a12ee765e97c797ec Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_debug.c | 61 +++++++------- hw_fence/src/hw_fence_drv_ipc.c | 8 +- hw_fence/src/hw_fence_drv_priv.c | 84 ++++++++++---------- hw_fence/src/hw_fence_drv_utils.c | 82 ++++++++++--------- hw_fence/src/hw_fence_ioctl.c | 8 +- hw_fence/src/msm_hw_fence.c | 12 +-- hw_fence/src/msm_hw_fence_synx_translation.c | 2 +- 7 files changed, 134 insertions(+), 123 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 405d0c0681..31e96318d6 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -15,7 +15,7 @@ #define HW_FENCE_DEBUG_MAX_LOOPS 200 #define HFENCE_TBL_MSG \ - "[%d]hfence[%d] v:%d err:%lu ctx:%llu seq:%llu wait:0x%llx alloc:%d f:0x%llx child_cnt:%d" \ + "[%d]hfence[%u] v:%d err:%u ctx:%llu seq:%llu wait:0x%llx alloc:%d f:0x%llx child_cnt:%d"\ "%s ct:%llu tt:%llu wt:%llu\n" /* each hwfence parent includes one "32-bit" element + "," separator */ @@ -25,7 +25,7 @@ /* event dump data includes one "32-bit" element + "|" separator */ #define HW_FENCE_MAX_DATA_PER_EVENT_DUMP (HW_FENCE_EVENT_MAX_DATA * 9) -#define HFENCE_EVT_MSG "[%d][cpu:%d][%lu] data[%d]:%s\n" +#define HFENCE_EVT_MSG "[%d][cpu:%d][%llu] data[%d]:%s\n" u32 msm_hw_fence_debug_level = HW_FENCE_PRINTK; @@ -59,7 +59,8 @@ static int _get_debugfs_input_client(struct file *file, int client_id; if (!file || !file->private_data) { - HWFNC_ERR("unexpected data %d\n", !file); + HWFNC_ERR("unexpected data file:0x%pK private_data:0x%pK\n", file, + file ? file->private_data : NULL); return -EINVAL; } *drv_data = file->private_data; @@ -285,7 +286,8 @@ static ssize_t hw_fence_dbg_tx_and_signal_clients_wr(struct file *file, int signal_id, ret; if (!file || !file->private_data) { - HWFNC_ERR("unexpected data %d\n", file); + HWFNC_ERR("unexpected data file:0x%pK private_data:0x%pK\n", file, + file ? file->private_data : NULL); return -EINVAL; } drv_data = file->private_data; @@ -455,7 +457,7 @@ static ssize_t hw_fence_dbg_create_wr(struct file *file, return -ENOMEM; } - snprintf(dma_fence->name, HW_FENCE_NAME_SIZE, "hwfence:id:%d:ctx=%lu:seqno:%lu", + snprintf(dma_fence->name, HW_FENCE_NAME_SIZE, "hwfence:id:%d:ctx=%llu:seqno:%llu", client_id, client_info->dma_context, hw_fence_dbg_seqno); spin_lock_init(fence_lock); @@ -482,7 +484,7 @@ static ssize_t hw_fence_dbg_create_wr(struct file *file, } static void _dump_fence_helper(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, - char *parents_dump, u64 hash, u32 count) + char *parents_dump, u32 index, u32 count) { char sublist[HW_FENCE_MAX_PARENTS_SUBLIST_DUMP]; u32 parents_cnt; @@ -497,8 +499,8 @@ static void _dump_fence_helper(enum hw_fence_drv_prio prio, struct msm_hw_fence memset(parents_dump, 0, sizeof(char) * HW_FENCE_MAX_PARENTS_DUMP); if (hw_fence->parents_cnt) { if (hw_fence->parents_cnt > MSM_HW_FENCE_MAX_JOIN_PARENTS) { - HWFNC_ERR("hfence[%d] has invalid parents_cnt:%d greater than max:%d\n", - hash, hw_fence->parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS); + HWFNC_ERR("hfence[%u] has invalid parents_cnt:%d greater than max:%d\n", + index, hw_fence->parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS); parents_cnt = MSM_HW_FENCE_MAX_JOIN_PARENTS; } else { parents_cnt = hw_fence->parents_cnt; @@ -507,12 +509,12 @@ static void _dump_fence_helper(enum hw_fence_drv_prio prio, struct msm_hw_fence memset(sublist, 0, sizeof(sublist)); for (i = 0; i < parents_cnt; i++) len += scnprintf(sublist + len, HW_FENCE_MAX_PARENTS_SUBLIST_DUMP - len, - "%lu,", hw_fence->parent_list[i]); + "%llu,", hw_fence->parent_list[i]); scnprintf(parents_dump, HW_FENCE_MAX_PARENTS_DUMP, " p:[%s]", sublist); } HWFNC_DBG_DUMP(prio, HFENCE_TBL_MSG, - count, hash, hw_fence->valid, hw_fence->error, hw_fence->ctx_id, hw_fence->seq_id, + count, index, hw_fence->valid, hw_fence->error, hw_fence->ctx_id, hw_fence->seq_id, hw_fence->wait_client_mask, hw_fence->fence_allocator, hw_fence->flags, hw_fence->pending_child_cnt, parents_dump, hw_fence->fence_create_time, hw_fence->fence_trigger_time, hw_fence->fence_wait_time); @@ -569,10 +571,10 @@ static int dump_single_entry(struct hw_fence_driver_data *drv_data, char *buf, u hw_fence = msm_hw_fence_find(drv_data, NULL, context, seqno, &hash); if (!hw_fence) { - HWFNC_ERR("no valid hfence found for context:%lu seqno:%lu hash:%lu", + HWFNC_ERR("no valid hfence found for context:%llu seqno:%llu hash:%llu", context, seqno, hash); len = scnprintf(buf + len, max_size - len, - "no valid hfence found for context:%lu seqno:%lu hash:%lu\n", + "no valid hfence found for context:%llu seqno:%llu hash:%llu\n", context, seqno, hash); goto exit; @@ -641,7 +643,7 @@ static void _dump_event(enum hw_fence_drv_prio prio, struct msm_hw_fence_event * memset(data, 0, sizeof(char) * HW_FENCE_MAX_DATA_PER_EVENT_DUMP); if (event->data_cnt > HW_FENCE_EVENT_MAX_DATA) { - HWFNC_ERR("event[%d] has invalid data_cnt:%lu greater than max_data_cnt:%lu\n", + HWFNC_ERR("event[%d] has invalid data_cnt:%u greater than max_data_cnt:%u\n", index, event->data_cnt, HW_FENCE_EVENT_MAX_DATA); data_cnt = HW_FENCE_EVENT_MAX_DATA; } else { @@ -650,7 +652,7 @@ static void _dump_event(enum hw_fence_drv_prio prio, struct msm_hw_fence_event * for (i = 0; i < data_cnt; i++) len += scnprintf(data + len, HW_FENCE_MAX_DATA_PER_EVENT_DUMP - len, - "%lx|", event->data[i]); + "%x|", event->data[i]); HWFNC_DBG_DUMP(prio, HFENCE_EVT_MSG, index, event->cpu, event->time, event->data_cnt, data); } @@ -693,7 +695,8 @@ static ssize_t hw_fence_dbg_dump_events_rd(struct file *file, char __user *user_ static bool wraparound; if (!file || !file->private_data) { - HWFNC_ERR("unexpected data %d\n", file); + HWFNC_ERR("unexpected data file:0x%pK private_data:0x%pK\n", file, + file ? file->private_data : NULL); return -EINVAL; } drv_data = file->private_data; @@ -713,7 +716,7 @@ static ssize_t hw_fence_dbg_dump_events_rd(struct file *file, char __user *user_ } if (user_buf_size < entry_size) { - HWFNC_ERR("Not enough buff size:%d to dump entries:%d\n", user_buf_size, + HWFNC_ERR("Not enough buff size:%zu to dump entries:%d\n", user_buf_size, entry_size); return -EINVAL; } @@ -750,7 +753,7 @@ static ssize_t hw_fence_dbg_dump_events_rd(struct file *file, char __user *user_ HWFNC_DBG_H("-- dump_events: index:%d qtime:%llu\n", index, hw_fence_get_qtime(drv_data)); if (len <= 0 || len > user_buf_size) { - HWFNC_ERR("len:%d invalid buff size:%d\n", len, user_buf_size); + HWFNC_ERR("len:%d invalid buff size:%zu\n", len, user_buf_size); len = 0; goto exit; } @@ -789,7 +792,7 @@ static void _dump_queue(enum hw_fence_drv_prio prio, struct msm_hw_fence_client hfi_header = (struct msm_hw_fence_hfi_queue_header *)queue->va_header; mb(); /* make sure data is ready before read */ - HWFNC_DBG_DUMP(prio, "%s va:0x%pK rd_idx:%lu wr_idx:%lu tx_wm:%lu q_size_bytes:%lu\n", + HWFNC_DBG_DUMP(prio, "%s va:0x%pK rd_idx:%u wr_idx:%u tx_wm:%u q_size_bytes:%u\n", (queue_type == HW_FENCE_TX_QUEUE) ? "TX QUEUE" : "RX QUEUE", queue->va_queue, hfi_header->read_index, hfi_header->write_index, hfi_header->tx_wm, queue->q_size_bytes); @@ -802,7 +805,7 @@ static void _dump_queue(enum hw_fence_drv_prio prio, struct msm_hw_fence_client timestamp = (u64)payload->timestamp_lo | ((u64)payload->timestamp_hi << 32); HWFNC_DBG_DUMP(prio, - "%s[%d]: hash:%d ctx:%llu seqno:%llu f:%llu d:%llu err:%u time:%llu\n", + "%s[%d]: hash:%llu ctx:%llu seqno:%llu f:%llu d:%llu err:%u time:%llu\n", (queue_type == HW_FENCE_TX_QUEUE) ? "tx" : "rx", i, payload->hash, payload->ctxt_id, payload->seqno, payload->flags, payload->client_data, payload->error, timestamp); @@ -840,7 +843,8 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user int client_id; if (!file || !file->private_data) { - HWFNC_ERR("unexpected data %d\n", file); + HWFNC_ERR("unexpected data file:0x%pK private_data:0x%pK\n", file, + file ? file->private_data : NULL); return -EINVAL; } drv_data = file->private_data; @@ -880,7 +884,8 @@ static ssize_t hw_fence_dbg_dump_table_rd(struct file *file, char __user *user_b static u32 index, cnt; if (!file || !file->private_data) { - HWFNC_ERR("unexpected data %d\n", file); + HWFNC_ERR("unexpected data file:0x%pK private_data:0x%pK\n", file, + file ? file->private_data : NULL); return -EINVAL; } drv_data = file->private_data; @@ -897,7 +902,7 @@ static ssize_t hw_fence_dbg_dump_table_rd(struct file *file, char __user *user_b } if (user_buf_size < entry_size) { - HWFNC_ERR("Not enough buff size:%d to dump entries:%d\n", user_buf_size, + HWFNC_ERR("Not enough buff size:%lu to dump entries:%d\n", user_buf_size, entry_size); return -EINVAL; } @@ -911,7 +916,7 @@ static ssize_t hw_fence_dbg_dump_table_rd(struct file *file, char __user *user_b dump_full_table(drv_data, buf, &index, &cnt, max_size, entry_size); if (len <= 0 || len > user_buf_size) { - HWFNC_ERR("len:%d invalid buff size:%d\n", len, user_buf_size); + HWFNC_ERR("len:%d invalid buff size:%lu\n", len, user_buf_size); len = 0; goto exit; } @@ -950,13 +955,14 @@ static ssize_t hw_fence_dbg_dump_table_wr(struct file *file, int num_input_params; if (!file || !file->private_data) { - HWFNC_ERR("unexpected data %d\n", file); + HWFNC_ERR("unexpected data file:0x%pK private_data:0x%pK\n", file, + file ? file->private_data : NULL); return -EINVAL; } drv_data = file->private_data; if (user_buf_size >= sizeof(buf)) { - HWFNC_ERR("wrong size:%d size:%d\n", user_buf_size, sizeof(buf)); + HWFNC_ERR("wrong size:%lu size:%lu\n", user_buf_size, sizeof(buf)); return -EFAULT; } @@ -966,7 +972,7 @@ static ssize_t hw_fence_dbg_dump_table_wr(struct file *file, buf[user_buf_size] = 0; /* end of string */ /* read the input params */ - num_input_params = sscanf(buf, "%lu %lu", ¶m_0, ¶m_1); + num_input_params = sscanf(buf, "%llu %llu", ¶m_0, ¶m_1); if (num_input_params == 2) { /* if debugfs receives two input params */ drv_data->debugfs_data.context_rd = param_0; @@ -1019,7 +1025,8 @@ static ssize_t hw_fence_dbg_create_join_fence(struct file *file, spinlock_t **fences_lock = NULL; if (!file || !file->private_data) { - HWFNC_ERR("unexpected data %d\n", file); + HWFNC_ERR("unexpected data file:0x%pK private_data:0x%pK\n", file, + file ? file->private_data : NULL); return -EINVAL; } drv_data = file->private_data; diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index a9b317e87c..b93956c5fd 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -280,7 +280,7 @@ static int _hw_fence_ipcc_init_map_with_configurable_clients(struct hw_fence_dri for (j = 0; j < clients_num; j++) { /* this should never happen if drv_data->clients_num is correct */ if (map_idx >= drv_data->clients_num) { - HWFNC_ERR("%s clients_num:%lu exceeds drv_data->clients_num:%lu\n", + HWFNC_ERR("%s clients_num:%d exceeds drv_data->clients_num:%u\n", drv_data->hw_fence_client_types[client_type].name, clients_num, drv_data->clients_num); return -EINVAL; @@ -381,7 +381,7 @@ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data) return -1; } - HWFNC_DBG_H("ipcc_io_mem:0x%lx\n", (u64)drv_data->ipcc_io_mem); + HWFNC_DBG_H("ipcc_io_mem:0x%llx\n", (u64)drv_data->ipcc_io_mem); HWFNC_DBG_H("Initialize dpu signals\n"); /* Enable Client-Signal pairs from DPU (25) to APPS(NS) (8) */ @@ -402,7 +402,7 @@ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data) val = 0x00000001; ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id, hw_fence_client->ipc_client_id_phys); - HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%llx\n", val, (u64)ptr); writel_relaxed(val, ptr); protocol_enabled = true; @@ -418,7 +418,7 @@ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data) (hw_fence_client->ipc_signal_id & 0xFFFF); ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, drv_data->protocol_id, hw_fence_client->ipc_client_id_phys); - HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%llx\n", val, (u64)ptr); writel_relaxed(val, ptr); } diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 52d1159b2f..61e3aedef8 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -62,7 +62,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: if (client_id >= drv_data->clients_num || !drv_data->hw_fence_client_queue_size[client_id].type) { - HWFNC_ERR("Invalid client_id:%d for clients_num:%lu\n", client_id, + HWFNC_ERR("Invalid client_id:%d for clients_num:%u\n", client_id, drv_data->clients_num); return -EINVAL; } @@ -89,7 +89,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, HWFNC_ERR("Failed to reserve id:%d client %d\n", mem_reserve_id, client_id); return -ENOMEM; } - HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size); + HWFNC_DBG_INIT("phys:0x%llx ptr:0x%pK size:%d\n", phys, ptr, size); /* Populate Memory descriptor with address */ mem_descriptor->virtual_addr = ptr; @@ -145,7 +145,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, hfi_queue_header->tx_wm = txq_idx_start; hfi_queue_header->read_index = txq_idx_start; hfi_queue_header->write_index = txq_idx_start; - HWFNC_DBG_INIT("init:TX_QUEUE client:%d rd_idx=%s=%lu\n", client_id, + HWFNC_DBG_INIT("init:TX_QUEUE client:%d rd_idx=%s=%u\n", client_id, skip_txq_wr_idx ? "wr_idx=tx_wm" : "wr_idx", txq_idx_start); } @@ -158,7 +158,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, queues[i].pa_queue = qphys; queues[i].va_header = hfi_queue_header; queues[i].q_size_bytes = queue_size; - HWFNC_DBG_INIT("init:%s client:%d q[%d] va=0x%pK pa=0x%x hd:0x%pK sz:%u pkt:%d\n", + HWFNC_DBG_INIT("init:%s client:%d q[%d] va=0x%pK pa=0x%llx hd:0x%pK sz:%u pkt:%d\n", hfi_queue_header->type == HW_FENCE_TX_QUEUE ? "TX_QUEUE" : "RX_QUEUE", client_id, i, queues[i].va_queue, queues[i].pa_queue, queues[i].va_header, queues[i].q_size_bytes, payload_size); @@ -171,7 +171,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, } else { queues[i].rd_wr_idx_factor = 1; } - HWFNC_DBG_INIT("rd_wr_idx_start:%lu rd_wr_idx_factor:%lu skip_wr_idx:%s\n", + HWFNC_DBG_INIT("rd_wr_idx_start:%u rd_wr_idx_factor:%u skip_wr_idx:%s\n", queues[i].rd_wr_idx_start, queues[i].rd_wr_idx_factor, queues[i].skip_wr_idx ? "true" : "false"); @@ -199,7 +199,7 @@ static void _translate_queue_indexes_custom_to_default(struct msm_hw_fence_queue if (REQUIRES_IDX_TRANSLATION(queue)) { *read_idx = IDX_TRANSLATE_CUSTOM_TO_DEFAULT(queue, *read_idx); *write_idx = IDX_TRANSLATE_CUSTOM_TO_DEFAULT(queue, *write_idx); - HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n", + HWFNC_DBG_Q("rd_idx_u32:%u wr_idx_u32:%u rd_wr_idx start:%u factor:%u\n", *read_idx, *write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); } } @@ -210,13 +210,13 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue *queue; if (queue_type >= HW_FENCE_CLIENT_QUEUES || !hw_fence_client || !payload) { - HWFNC_ERR("Invalid queue type:%s hw_fence_client:0x%pK payload:0x%pK\n", queue_type, + HWFNC_ERR("Invalid queue type:%d hw_fence_client:0x%pK payload:0x%pK\n", queue_type, hw_fence_client, payload); return -EINVAL; } queue = &hw_fence_client->queues[queue_type]; - HWFNC_DBG_Q("read client:%lu queue:0x%pK\n", hw_fence_client->client_id, queue); + HWFNC_DBG_Q("read client:%d queue:0x%pK\n", hw_fence_client->client_id, queue); return hw_fence_read_queue_helper(queue, payload); } @@ -234,7 +234,7 @@ int hw_fence_read_queue_helper(struct msm_hw_fence_queue *queue, q_size_u32 = (queue->q_size_bytes / sizeof(u32)); payload_size_u32 = (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)); - HWFNC_DBG_Q("sizeof payload:%d\n", sizeof(struct msm_hw_fence_queue_payload)); + HWFNC_DBG_Q("sizeof payload:%lu\n", sizeof(struct msm_hw_fence_queue_payload)); if (!hfi_header || !payload) { HWFNC_ERR("Invalid queue\n"); @@ -262,7 +262,7 @@ int hw_fence_read_queue_helper(struct msm_hw_fence_queue *queue, /* Move the pointer where we need to read and cast it */ read_ptr = ((u32 *)queue->va_queue + read_idx); read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr; - HWFNC_DBG_Q("read_ptr:0x%pK queue: va=0x%pK pa=0x%pK read_ptr_payload:0x%pK\n", read_ptr, + HWFNC_DBG_Q("read_ptr:0x%pK queue: va=0x%pK pa=0x%llx read_ptr_payload:0x%pK\n", read_ptr, queue->va_queue, queue->pa_queue, read_ptr_payload); /* Calculate the index after the read */ @@ -279,7 +279,7 @@ int hw_fence_read_queue_helper(struct msm_hw_fence_queue *queue, /* translate to_read_idx to custom indexing with offset */ if (REQUIRES_IDX_TRANSLATION(queue)) { to_read_idx = IDX_TRANSLATE_DEFAULT_TO_CUSTOM(queue, to_read_idx); - HWFNC_DBG_Q("translated to_read_idx:%lu rd_wr_idx start:%lu factor:%lu\n", + HWFNC_DBG_Q("translated to_read_idx:%u rd_wr_idx start:%u factor:%u\n", to_read_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); } @@ -349,7 +349,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, int ret = 0; if (queue_type >= hw_fence_client->queues_num) { - HWFNC_ERR("Invalid queue type:%d client_id:%d q_num:%lu\n", queue_type, + HWFNC_ERR("Invalid queue type:%d client_id:%d q_num:%d\n", queue_type, hw_fence_client->client_id, hw_fence_client->queues_num); return -EINVAL; } @@ -409,13 +409,13 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, /* Move the pointer where we need to write and cast it */ q_payload_write_ptr = ((u32 *)queue->va_queue + write_idx); write_ptr_payload = (struct msm_hw_fence_queue_payload *)q_payload_write_ptr; - HWFNC_DBG_Q("q_payload_write_ptr:0x%pK queue: va=0x%pK pa=0x%pK write_ptr_payload:0x%pK\n", + HWFNC_DBG_Q("q_payload_write_ptr:0x%pK queue: va=0x%pK pa=0x%llx write_ptr_payload:0x%pK\n", q_payload_write_ptr, queue->va_queue, queue->pa_queue, write_ptr_payload); /* calculate the index after the write */ to_write_idx = write_idx + payload_size_u32; - HWFNC_DBG_Q("to_write_idx:%d write_idx:%d payload_size:%u\n", to_write_idx, write_idx, + HWFNC_DBG_Q("to_write_idx:%u write_idx:%u payload_size:%u\n", to_write_idx, write_idx, payload_size_u32); HWFNC_DBG_L("client_id:%d update %s hash:%llu ctx_id:%llu seqno:%llu flags:%llu error:%u\n", hw_fence_client->client_id, _get_queue_type(queue_type), @@ -432,7 +432,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, /* translate to_write_idx to custom indexing with offset */ if (REQUIRES_IDX_TRANSLATION(queue)) { to_write_idx = IDX_TRANSLATE_DEFAULT_TO_CUSTOM(queue, to_write_idx); - HWFNC_DBG_Q("translated to_write_idx:%lu rd_wr_idx start:%lu factor:%lu\n", + HWFNC_DBG_Q("translated to_write_idx:%d rd_wr_idx start:%d factor:%d\n", to_write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); } @@ -498,7 +498,7 @@ int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data, } first_payload = (struct msm_hw_fence_queue_payload *)((u32 *)queue->va_queue + read_idx); - HWFNC_DBG_Q("client:%d txq: va=0x%pK pa=0x%pK idx:%d ptr_payload:0x%pK\n", + HWFNC_DBG_Q("client:%d txq: va=0x%pK pa=0x%llx idx:%d ptr_payload:0x%pK\n", hw_fence_client->client_id, queue->va_queue, queue->pa_queue, read_idx, first_payload); @@ -520,7 +520,7 @@ int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data, second_payload = (struct msm_hw_fence_queue_payload *) ((u32 *)queue->va_queue + second_idx); - HWFNC_DBG_Q("client:%d txq: va=0x%pK pa=0x%pK idx:%d ptr_payload:0x%pK\n", + HWFNC_DBG_Q("client:%d txq: va=0x%pK pa=0x%llx idx:%d ptr_payload:0x%pK\n", hw_fence_client->client_id, queue->va_queue, queue->pa_queue, second_idx, second_payload); @@ -564,7 +564,7 @@ static int init_global_locks(struct hw_fence_driver_data *drv_data) HWFNC_ERR("Failed to reserve clients locks mem %d\n", ret); return -ENOMEM; } - HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size); + HWFNC_DBG_INIT("phys:0x%llx ptr:0x%pK size:%d\n", phys, ptr, size); /* Populate Memory descriptor with address */ mem_descriptor = &drv_data->clients_locks_mem_desc; @@ -594,7 +594,7 @@ static int init_hw_fences_table(struct hw_fence_driver_data *drv_data) HWFNC_ERR("Failed to reserve table mem %d\n", ret); return -ENOMEM; } - HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size); + HWFNC_DBG_INIT("phys:0x%llx ptr:0x%pK size:%d\n", phys, ptr, size); /* Populate Memory descriptor with address */ mem_descriptor = &drv_data->hw_fences_mem_desc; @@ -629,8 +629,8 @@ static int init_hw_fences_events(struct hw_fence_driver_data *drv_data) } drv_data->events = (struct msm_hw_fence_event *)ptr; drv_data->total_events = size / sizeof(struct msm_hw_fence_event); - HWFNC_DBG_INIT("events:0x%pK total_events:%u event_sz:%u total_size:%u\n", drv_data->events, - drv_data->total_events, sizeof(struct msm_hw_fence_event), size); + HWFNC_DBG_INIT("events:0x%pK total_events:%u event_sz:%lu total_size:%u\n", + drv_data->events, drv_data->total_events, sizeof(struct msm_hw_fence_event), size); return 0; } @@ -914,7 +914,7 @@ static inline struct msm_hw_fence *_get_hw_fence(u32 table_total_entries, u64 hash) { if (hash >= table_total_entries) { - HWFNC_ERR("hash:%llu out of max range:%llu\n", + HWFNC_ERR("hash:%llu out of max range:%u\n", hash, table_total_entries); return NULL; } @@ -978,7 +978,7 @@ static void _reserve_hw_fence(struct hw_fence_driver_data *drv_data, hw_fence->fence_create_time = hw_fence_get_qtime(drv_data); hw_fence->debug_refcount++; - HWFNC_DBG_LUT("Reserved fence client:%d ctx:%llu seq:%llu hash:%llu\n", + HWFNC_DBG_LUT("Reserved fence client:%d ctx:%llu seq:%llu hash:%u\n", client_id, context, seqno, hash); } @@ -992,7 +992,7 @@ static void _unreserve_hw_fence(struct hw_fence_driver_data *drv_data, /* unreserve this HW fence */ hw_fence->valid = 0; - HWFNC_DBG_LUT("Unreserved fence client:%d ctx:%llu seq:%llu hash:%llu\n", + HWFNC_DBG_LUT("Unreserved fence client:%d ctx:%llu seq:%llu hash:%u\n", client_id, context, seqno, hash); } @@ -1014,7 +1014,7 @@ static void _reserve_join_fence(struct hw_fence_driver_data *drv_data, hw_fence->pending_child_cnt = pending_child_cnt; - HWFNC_DBG_LUT("Reserved join fence client:%d ctx:%llu seq:%llu hash:%llu\n", + HWFNC_DBG_LUT("Reserved join fence client:%d ctx:%llu seq:%llu hash:%u\n", client_id, context, seqno, hash); } @@ -1028,7 +1028,7 @@ static void _fence_found(struct hw_fence_driver_data *drv_data, * Currently just keeping this function for debugging purposes, can be removed * in final versions */ - HWFNC_DBG_LUT("Found fence client:%d ctx:%llu seq:%llu hash:%llu\n", + HWFNC_DBG_LUT("Found fence client:%d ctx:%llu seq:%llu hash:%u\n", client_id, context, seqno, hash); } @@ -1129,7 +1129,7 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d wmb(); } - HWFNC_DBG_L("client_id:%lu op:%s ctx:%llu seqno:%llu hash:%llu step:%llu\n", + HWFNC_DBG_L("client_id:%u op:%s ctx:%llu seqno:%llu hash:%llu step:%llu\n", client_id, _get_op_mode(op_code), context, seqno, *hash, step); hw_fence_found = true; @@ -1178,7 +1178,7 @@ int hw_fence_create(struct hw_fence_driver_data *drv_data, /* allocate hw fence in table */ if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_CREATE, hash)) { - HWFNC_ERR("Fail to create fence client:%lu ctx:%llu seqno:%llu\n", + HWFNC_ERR("Fail to create fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); ret = -EINVAL; } @@ -1207,7 +1207,7 @@ int hw_fence_destroy(struct hw_fence_driver_data *drv_data, /* remove hw fence from table*/ if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno)) { - HWFNC_ERR("Fail destroying fence client:%lu ctx:%llu seqno:%llu\n", + HWFNC_ERR("Fail destroying fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); ret = -EINVAL; } @@ -1225,12 +1225,12 @@ int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, hash); if (!hw_fence) { - HWFNC_ERR("bad hw fence hash:%llu client:%lu\n", hash, client_id); + HWFNC_ERR("bad hw fence hash:%llu client:%u\n", hash, client_id); return -EINVAL; } if (hw_fence->fence_allocator != client_id) { - HWFNC_ERR("client:%lu cannot destroy fence hash:%llu fence_allocator:%lu\n", + HWFNC_ERR("client:%u cannot destroy fence hash:%llu fence_allocator:%u\n", client_id, hash, hw_fence->fence_allocator); return -EINVAL; } @@ -1238,7 +1238,7 @@ int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, /* remove hw fence from table*/ if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, hw_fence->ctx_id, hw_fence->seq_id)) { - HWFNC_ERR("Fail destroying fence client:%lu ctx:%llu seqno:%llu hash:%llu\n", + HWFNC_ERR("Fail destroying fence client:%u ctx:%llu seqno:%llu hash:%llu\n", client_id, hw_fence->ctx_id, hw_fence->seq_id, hash); ret = -EINVAL; } @@ -1277,12 +1277,12 @@ static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_ join_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context, seqno, client_id, pending_child_cnt, HW_FENCE_LOOKUP_OP_CREATE_JOIN, hash); if (!join_fence) - HWFNC_ERR("Fail to create join fence client:%lu ctx:%llu seqno:%llu\n", + HWFNC_ERR("Fail to create join fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); } else { /* destroy the fence */ if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno)) - HWFNC_ERR("Fail destroying join fence client:%lu ctx:%llu seqno:%llu\n", + HWFNC_ERR("Fail destroying join fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); } @@ -1301,7 +1301,7 @@ struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, hw_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_FIND_FENCE, hash); if (!hw_fence) - HWFNC_ERR("Fail to find hw fence client:%lu ctx:%llu seqno:%llu\n", + HWFNC_ERR("Fail to find hw fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); return hw_fence; @@ -1363,7 +1363,7 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context, child_fence->seqno, &hash); if (!hw_fence_child) { - HWFNC_ERR("Cannot cleanup child fence context:%lu seqno:%lu hash:%lu\n", + HWFNC_ERR("Cannot cleanup child fence context:%llu seqno:%llu hash:%llu\n", child_fence->context, child_fence->seqno, hash); /* @@ -1463,7 +1463,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context, child_fence->seqno, &hash); if (!hw_fence_child) { - HWFNC_ERR("Cannot find child fence context:%lu seqno:%lu hash:%lu\n", + HWFNC_ERR("Cannot find child fence context:%llu seqno:%llu hash:%llu\n", child_fence->context, child_fence->seqno, hash); ret = -EINVAL; goto error_array; @@ -1610,7 +1610,7 @@ int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, } /* fence must be hw-fence */ if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { - HWFNC_ERR("DMA Fence in is not a HW Fence flags:0x%llx\n", fence->flags); + HWFNC_ERR("DMA Fence in is not a HW Fence flags:0x%lx\n", fence->flags); return -EINVAL; } @@ -1666,7 +1666,7 @@ void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, hfi_header->tx_wm = wr_idx; writel_relaxed(wr_idx, &hfi_header->read_index); wmb(); /* make sure data is updated after write the index*/ - HWFNC_DBG_Q("update tx queue %s to match write_index:%lu\n", + HWFNC_DBG_Q("update tx queue %s to match write_index:%u\n", queue->skip_wr_idx ? "read_index=tx_wm" : "read_index", wr_idx); /* For the client RxQ: set the write-index same as last read done by the client */ @@ -1693,7 +1693,7 @@ void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, /* unlock */ GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0); - HWFNC_DBG_Q("update rx queue write_index to match read_index:%lu\n", rd_idx); + HWFNC_DBG_Q("update rx queue write_index to match read_index:%u\n", rd_idx); } } @@ -1706,7 +1706,7 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ if (hw_fence->wait_client_mask & BIT(hw_fence_client->client_id)) { - HWFNC_DBG_H("clearing client:%d wait bit for fence: ctx:%d seqno:%d\n", + HWFNC_DBG_H("clearing client:%d wait bit for fence: ctx:%llu seqno:%llu\n", hw_fence_client->client_id, hw_fence->ctx_id, hw_fence->seq_id); hw_fence->wait_client_mask &= ~BIT(hw_fence_client->client_id); @@ -1728,7 +1728,7 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, ret = hw_fence_destroy(drv_data, hw_fence_client, hw_fence->ctx_id, hw_fence->seq_id); if (ret) { - HWFNC_ERR("Error destroying HW fence: ctx:%d seqno:%d\n", + HWFNC_ERR("Error destroying HW fence: ctx:%llu seqno:%llu\n", hw_fence->ctx_id, hw_fence->seq_id); } } diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index f2b34c1c4f..285adeec80 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -8,8 +8,12 @@ #include #include #include -#include #include +#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE) +#include +#else +#include +#endif #if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) #include #endif @@ -182,7 +186,7 @@ static void _unlock(struct hw_fence_driver_data *drv_data, uint64_t *lock) */ #if IS_ENABLED(CONFIG_DEBUG_FS) drv_data->debugfs_data.lock_wake_cnt++; - HWFNC_DBG_LOCK("triggering ipc to unblock SVM lock_val:%d cnt:%llu\n", lock_val, + HWFNC_DBG_LOCK("triggering ipc to unblock SVM lock_val:%llu cnt:%llu\n", lock_val, drv_data->debugfs_data.lock_wake_cnt); #endif hw_fence_ipcc_trigger_signal(drv_data, @@ -418,9 +422,9 @@ static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, srcvmids = BIT(src_vmlist[0].vmid); dstvmids = BIT(dst_vmlist[0].vmid) | BIT(dst_vmlist[1].vmid); ret = qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res), &srcvmids, - dst_vmlist, ARRAY_SIZE(dst_vmlist)); + dst_vmlist, ARRAY_SIZE(dst_vmlist)); if (ret) { - HWFNC_ERR("%s: qcom_scm_assign_mem failed addr=%x size=%u err=%d\n", + HWFNC_ERR("%s: qcom_scm_assign_mem failed addr=0x%llx size=%lu err=%d\n", __func__, drv_data->res.start, drv_data->size, ret); return ret; } @@ -451,11 +455,11 @@ static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, acl, sgl, NULL, &drv_data->memparcel); #endif if (ret) { - HWFNC_ERR("%s: gh_rm_mem_share failed addr=%x size=%u err=%d\n", + HWFNC_ERR("%s: gh_rm_mem_share failed addr=%llx size=%lu err=%d\n", __func__, drv_data->res.start, drv_data->size, ret); /* Attempt to give resource back to HLOS */ - qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res), - &dstvmids, src_vmlist, ARRAY_SIZE(src_vmlist)); + qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res), &dstvmids, + src_vmlist, ARRAY_SIZE(src_vmlist)); ret = -EPROBE_DEFER; } @@ -525,10 +529,10 @@ static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *da if (drv_data->res.start == res.start && resource_size(&drv_data->res) == resource_size(&res)) { drv_data->vm_ready = true; - HWFNC_DBG_INIT("mem_ready: add:0x%x size:%d ret:%d\n", res.start, - resource_size(&res), ret); + HWFNC_DBG_INIT("mem_ready: add:0x%llx size:%llu ret:%d\n", + res.start, resource_size(&res), ret); } else { - HWFNC_ERR("mem-shared mismatch:[0x%x,%d] expected:[0x%x,%d]\n", + HWFNC_ERR("mem-shared:[0x%llx,%llu] expected:[0x%llx,%llu]\n", res.start, resource_size(&res), drv_data->res.start, resource_size(&drv_data->res)); } @@ -586,12 +590,12 @@ int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) } drv_data->size = resource_size(&drv_data->res); if (drv_data->size < drv_data->used_mem_size) { - HWFNC_ERR("0x%x size of carved-out memory region is less than required size:0x%x\n", + HWFNC_ERR("0x%lx size of carved-out memory region less than required size:0x%x\n", drv_data->size, drv_data->used_mem_size); return -ENOMEM; } - HWFNC_DBG_INIT("io_mem_base:0x%x start:0x%x end:0x%x size:0x%x name:%s\n", + HWFNC_DBG_INIT("io_mem_base:0x%pK start:0x%llx end:0x%llx size:0x%lx name:%s\n", drv_data->io_mem_base, drv_data->res.start, drv_data->res.end, drv_data->size, drv_data->res.name); @@ -661,7 +665,7 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: if (client_id >= drv_data->clients_num || !drv_data->hw_fence_client_queue_size[client_id].type) { - HWFNC_ERR("unexpected client_id:%d for clients_num:%lu\n", client_id, + HWFNC_ERR("unexpected client_id:%d for clients_num:%d\n", client_id, drv_data->clients_num); ret = -EINVAL; goto exit; @@ -675,7 +679,7 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, remaining_size_bytes = drv_data->size - start_offset; if (start_offset >= drv_data->size || remaining_size_bytes < sizeof(struct msm_hw_fence_event)) { - HWFNC_DBG_INFO("no space for events total_sz:%lu offset:%lu evt_sz:%lu\n", + HWFNC_DBG_INFO("no space for events total_sz:%lu offset:%u evt_sz:%lu\n", drv_data->size, start_offset, sizeof(struct msm_hw_fence_event)); ret = -ENOMEM; goto exit; @@ -693,19 +697,19 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, } if (start_offset + *size > drv_data->size) { - HWFNC_ERR("reservation request:%lu exceeds total size:%d\n", - start_offset + *size, drv_data->size); + HWFNC_ERR("reservation request exceeds total size:%lu\n", + drv_data->size); return -ENOMEM; } - HWFNC_DBG_INIT("type:%s (%d) io_mem_base:0x%x start:0x%x start_offset:%lu size:0x%x\n", - _get_mem_reserve_type(type), type, drv_data->io_mem_base, drv_data->res.start, + HWFNC_DBG_INIT("type:%s (%d) start:0x%llx start_offset:%u size:0x%x\n", + _get_mem_reserve_type(type), type, drv_data->res.start, start_offset, *size); *phys = drv_data->res.start + (phys_addr_t)start_offset; *pa = (drv_data->io_mem_base + start_offset); /* offset is in bytes */ - HWFNC_DBG_H("phys:0x%x pa:0x%pK\n", *phys, *pa); + HWFNC_DBG_H("phys:0x%llx pa:0x%pK\n", *phys, *pa); exit: return ret; @@ -748,7 +752,7 @@ static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_d desc->txq_idx_start = tmp[2]; if (count >= 4) { if (tmp[3] > 1) { - HWFNC_ERR("%s invalid txq_idx_by_payload prop:%lu\n", desc->name, tmp[3]); + HWFNC_ERR("%s invalid txq_idx_by_payload prop:%u\n", desc->name, tmp[3]); ret = -EINVAL; goto exit; } @@ -758,14 +762,14 @@ static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_d if (desc->start_padding % sizeof(u32) || desc->end_padding % sizeof(u32) || (desc->start_padding + desc->end_padding) % sizeof(u64)) { - HWFNC_ERR("%s start_padding:%lu end_padding:%lu violates mem alignment\n", + HWFNC_ERR("%s start_padding:%u end_padding:%u violates mem alignment\n", desc->name, desc->start_padding, desc->end_padding); ret = -EINVAL; goto exit; } if (desc->start_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) { - HWFNC_ERR("%s client queues_num:%lu start_padding:%lu will overflow mem_size\n", + HWFNC_ERR("%s client queues_num:%u start_padding:%u will overflow mem_size\n", desc->name, desc->queues_num, desc->start_padding); ret = -EINVAL; goto exit; @@ -773,7 +777,7 @@ static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_d if (desc->end_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) - desc->start_padding) { - HWFNC_ERR("%s client q_num:%lu start_p:%lu end_p:%lu will overflow mem_size\n", + HWFNC_ERR("%s client q_num:%u start_p:%u end_p:%u will overflow mem_size\n", desc->name, desc->queues_num, desc->start_padding, desc->end_padding); ret = -EINVAL; goto exit; @@ -782,14 +786,14 @@ static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_d max_idx_from_zero = idx_by_payload ? desc->queue_entries : desc->queue_entries * payload_size_u32; if (desc->txq_idx_start >= U32_MAX - max_idx_from_zero) { - HWFNC_ERR("%s txq_idx start:%lu by_payload:%s q_entries:%d will overflow txq_idx\n", + HWFNC_ERR("%s txq_idx start:%u by_payload:%s q_entries:%u will overflow txq_idx\n", desc->name, desc->txq_idx_start, idx_by_payload ? "true" : "false", desc->queue_entries); ret = -EINVAL; goto exit; } - HWFNC_DBG_INIT("%s: start_p=%lu end_p=%lu txq_idx_start:%lu txq_idx_by_payload:%s\n", + HWFNC_DBG_INIT("%s: start_p=%u end_p=%u txq_idx_start:%u txq_idx_by_payload:%s\n", desc->name, desc->start_padding, desc->end_padding, desc->txq_idx_start, idx_by_payload ? "true" : "false"); @@ -818,7 +822,7 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da desc->queue_entries = tmp[2]; if (tmp[3] > 1) { - HWFNC_ERR("%s invalid skip_txq_wr_idx prop:%lu\n", desc->name, tmp[3]); + HWFNC_ERR("%s invalid skip_txq_wr_idx prop:%u\n", desc->name, tmp[3]); return -EINVAL; } desc->skip_txq_wr_idx = tmp[3]; @@ -826,7 +830,7 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da if (desc->clients_num > desc->max_clients_num || !desc->queues_num || desc->queues_num > HW_FENCE_CLIENT_QUEUES || !desc->queue_entries) { - HWFNC_ERR("%s invalid dt: clients_num:%lu queues_num:%lu, queue_entries:%lu\n", + HWFNC_ERR("%s invalid dt: clients_num:%u queues_num:%u, queue_entries:%u\n", desc->name, desc->clients_num, desc->queues_num, desc->queue_entries); return -EINVAL; } @@ -840,7 +844,7 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da /* compute mem_size */ if (desc->queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) { - HWFNC_ERR("%s client queue entries:%lu will overflow client queue size\n", + HWFNC_ERR("%s client queue entries:%u will overflow client queue size\n", desc->name, desc->queue_entries); return -EINVAL; } @@ -849,7 +853,7 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da if (queue_size >= ((U32_MAX & PAGE_MASK) - (HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) + desc->start_padding + desc->end_padding)) / desc->queues_num) { - HWFNC_ERR("%s client queue_sz:%lu start_p:%lu end_p:%lu will overflow mem size\n", + HWFNC_ERR("%s client queue_sz:%u start_p:%u end_p:%u will overflow mem size\n", desc->name, queue_size, desc->start_padding, desc->end_padding); return -EINVAL; } @@ -858,12 +862,12 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da (queue_size * desc->queues_num) + desc->start_padding + desc->end_padding); if (desc->mem_size > MAX_CLIENT_QUEUE_MEM_SIZE) { - HWFNC_ERR("%s client queue mem_size:%lu greater than max mem size:%lu\n", + HWFNC_ERR("%s client queue mem_size:%u greater than max mem size:%d\n", desc->name, desc->mem_size, MAX_CLIENT_QUEUE_MEM_SIZE); return -EINVAL; } - HWFNC_DBG_INIT("%s: clients=%lu q_num=%lu q_entries=%lu mem_sz=%lu skips_wr_ptr:%s\n", + HWFNC_DBG_INIT("%s: clients=%u q_num=%u q_entries=%u mem_sz=%u skips_wr_ptr:%s\n", desc->name, desc->clients_num, desc->queues_num, desc->queue_entries, desc->mem_size, desc->skip_txq_wr_idx ? "true" : "false"); @@ -921,7 +925,7 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data) drv_data->hw_fence_client_queue_size[client_id] = (struct hw_fence_client_queue_desc){desc, start_offset}; - HWFNC_DBG_INIT("%s client_id_ext:%lu client_id:%lu start_offset:%lu\n", + HWFNC_DBG_INIT("%s client_id_ext:%u client_id:%u start_offset:%u\n", desc->name, client_id_ext, client_id, start_offset); start_offset += desc->mem_size; } @@ -945,7 +949,7 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) drv_data->hw_fence_table_entries = val; if (drv_data->hw_fence_table_entries >= U32_MAX / sizeof(struct msm_hw_fence)) { - HWFNC_ERR("table entries:%lu will overflow table size\n", + HWFNC_ERR("table entries:%u will overflow table size\n", drv_data->hw_fence_table_entries); return -EINVAL; } @@ -962,7 +966,7 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) /* ctrl queues init */ if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CTRL_QUEUE_PAYLOAD) { - HWFNC_ERR("queue entries:%lu will overflow ctrl queue size\n", + HWFNC_ERR("queue entries:%u will overflow ctrl queue size\n", drv_data->hw_fence_queue_entries); return -EINVAL; } @@ -971,7 +975,7 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) if (drv_data->hw_fence_ctrl_queue_size >= (U32_MAX - HW_FENCE_HFI_CTRL_HEADERS_SIZE) / HW_FENCE_CTRL_QUEUES) { - HWFNC_ERR("queue size:%lu will overflow ctrl queue mem size\n", + HWFNC_ERR("queue size:%u will overflow ctrl queue mem size\n", drv_data->hw_fence_ctrl_queue_size); return -EINVAL; } @@ -993,12 +997,12 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) if (!drv_data->clients) return -ENOMEM; - HWFNC_DBG_INIT("table: entries=%lu mem_size=%lu queue: entries=%lu\b", + HWFNC_DBG_INIT("table: entries=%u mem_size=%u queue: entries=%u\b", drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size, drv_data->hw_fence_queue_entries); - HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu\b", + HWFNC_DBG_INIT("ctrl queue: size=%u mem_size=%u\b", drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size); - HWFNC_DBG_INIT("clients_num: %lu, total_mem_size:%lu\n", drv_data->clients_num, + HWFNC_DBG_INIT("clients_num: %u, total_mem_size:%u\n", drv_data->clients_num, drv_data->used_mem_size); return 0; @@ -1028,7 +1032,7 @@ int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data) } drv_data->ipcc_io_mem = ptr; - HWFNC_DBG_H("mapped address:0x%x size:0x%x io_mem:0x%pK\n", + HWFNC_DBG_H("mapped address:0x%llx size:0x%x io_mem:0x%pK\n", drv_data->ipcc_reg_base, drv_data->ipcc_size, drv_data->ipcc_io_mem); diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index d7eab54fee..ebfcd816e8 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -255,7 +255,7 @@ static long hw_sync_ioctl_create_fence(struct hw_sync_obj *obj, unsigned long ar return -ENOMEM; } - snprintf(fence->name, HW_FENCE_NAME_SIZE, "hwfence:id:%d:ctx=%lu:seqno:%lu", + snprintf(fence->name, HW_FENCE_NAME_SIZE, "hwfence:id:%d:ctx=%llu:seqno:%llu", obj->client_id, obj->context, data.seqno); spin_lock_init(fence_lock); @@ -533,7 +533,7 @@ static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg) ktime_compare_safe(exp_ktime, cur_ktime) > 0); if (!ret) { - HWFNC_ERR("timed out waiting for the client signal %d\n", data.timeout_ms); + HWFNC_ERR("timed out waiting for the client signal %llu\n", data.timeout_ms); /* Decrement the refcount that hw_sync_get_fence increments */ dma_fence_put(fence); return -ETIMEDOUT; @@ -548,7 +548,7 @@ static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg) HWFNC_ERR("unable to read client rxq client_id:%d\n", obj->client_id); break; } - HWFNC_DBG_L("rxq read: hash:%llu, flags:%llu, error:%lu\n", + HWFNC_DBG_L("rxq read: hash:%llu, flags:%llu, error:%d\n", payload.hash, payload.flags, payload.error); if (payload.ctxt_id == fence->context && payload.seqno == fence->seqno) { /* Decrement the refcount that hw_sync_get_fence increments */ @@ -561,7 +561,7 @@ static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg) dma_fence_put(fence); HWFNC_ERR("fence received did not match the fence expected\n"); - HWFNC_ERR("fence received: context:%d seqno:%d fence expected: context:%d seqno:%d\n", + HWFNC_ERR("fence received: ctx:%llu seqno:%llu fence expected: ctx:%llu seqno:%llu\n", payload.ctxt_id, payload.seqno, fence->context, fence->seqno); return read; diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 5ec9286eb5..93958105a0 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -92,7 +92,7 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, hw_fence_client->queues_num = hw_fence_utils_get_queues_num(hw_fence_drv_data, client_id); if (!hw_fence_client->queues_num || (hw_fence_client->update_rxq && hw_fence_client->queues_num < HW_FENCE_CLIENT_QUEUES)) { - HWFNC_ERR("client:%d invalid q_num:%lu for updates_rxq:%s\n", client_id, + HWFNC_ERR("client:%d invalid q_num:%d for updates_rxq:%s\n", client_id, hw_fence_client->queues_num, hw_fence_client->update_rxq ? "true" : "false"); ret = -EINVAL; @@ -243,7 +243,7 @@ int msm_hw_fence_destroy(void *client_handle, /* This Fence not a HW-Fence */ if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { - HWFNC_ERR("DMA Fence is not a HW Fence flags:0x%llx\n", fence->flags); + HWFNC_ERR("DMA Fence is not a HW Fence flags:0x%lx\n", fence->flags); return -EINVAL; } @@ -437,7 +437,7 @@ int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 erro return -EAGAIN; } else if (IS_ERR_OR_NULL(client_handle) || (handle >= hw_fence_drv_data->hw_fences_tbl_cnt)) { - HWFNC_ERR("Invalid handle:%d or client handle:%d max:%d\n", handle, + HWFNC_ERR("Invalid handle:%llu or client handle:%d max:%d\n", handle, IS_ERR_OR_NULL(client_handle), hw_fence_drv_data->hw_fences_tbl_cnt); return -EINVAL; } @@ -464,11 +464,11 @@ int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, u3 return -EAGAIN; } else if (IS_ERR_OR_NULL(client_handle) || (handle >= hw_fence_drv_data->hw_fences_tbl_cnt) || !error) { - HWFNC_ERR("Invalid client_handle:0x%pK or fence handle:%d max:%d or error:%d\n", + HWFNC_ERR("Invalid client_handle:0x%pK or fence handle:%llu max:%d or error:%d\n", client_handle, handle, hw_fence_drv_data->hw_fences_tbl_cnt, error); return -EINVAL; } else if (update_flags != MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE) { - HWFNC_ERR("invalid flags:0x%x expected:0x%x no support of in-place error update\n", + HWFNC_ERR("invalid flags:0x%x expected:0x%lx no support of in-place error update\n", update_flags, MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE); return -EINVAL; } @@ -623,7 +623,7 @@ int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence) HWFNC_ERR("Invalid client handle:%d\n", IS_ERR_OR_NULL(client_handle)); return -EINVAL; } else if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { - HWFNC_ERR("DMA Fence is not a HW Fence ctx:%llu seqno:%llu flags:0x%llx\n", + HWFNC_ERR("DMA Fence is not a HW Fence ctx:%llu seqno:%llu flags:0x%lx\n", fence->context, fence->seqno, fence->flags); return -EINVAL; } diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 6970eb4d60..50b7e382b4 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -124,7 +124,7 @@ struct synx_session *synx_hwfence_initialize(struct synx_initialization_params * (struct msm_hw_fence_mem_addr *)params->ptr); if (IS_ERR_OR_NULL(client_handle)) { kfree(session); - HWFNC_ERR("failed to initialize synx_id:%d ret:%d\n", params->id, + HWFNC_ERR("failed to initialize synx_id:%d ret:%ld\n", params->id, PTR_ERR(client_handle)); return ERR_PTR(to_synx_status(PTR_ERR(client_handle))); } From bf9cf7bc100db66539ef27a7d10f14a29ec55437 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 4 Oct 2023 21:16:21 -0700 Subject: [PATCH 082/166] mm-drivers: sync_fence: resolve compilation failures for sync fences Resolve compilation issues in sync fence module. Change-Id: If619e5cdf443806cc49a001bcad42d625220725d Signed-off-by: Grace An --- sync_fence/src/qcom_sync_file.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/sync_fence/src/qcom_sync_file.c b/sync_fence/src/qcom_sync_file.c index 04d8951233..8bc4fea268 100644 --- a/sync_fence/src/qcom_sync_file.c +++ b/sync_fence/src/qcom_sync_file.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2021, The Linux Foundation. All rights reserved. */ @@ -21,6 +21,7 @@ #include #include #include +#include #define CLASS_NAME "sync" #define DRV_NAME "spec_sync" @@ -378,7 +379,7 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) for (i = 0; i < num_fences; i++) { if (!(fence_array->fences[i]->context == DUMMY_CONTEXT && fence_array->fences[i]->seqno == DUMMY_SEQNO)) { - pr_err("fence array already populated, spec fd:%d status:%d flags:0x%x\n", + pr_err("fence array already populated, spec fd:%d status:%d flags:0x%lx\n", sync_bind_info->out_bind_fd, dma_fence_get_status(fence), fence->flags); ret = -EINVAL; @@ -491,7 +492,11 @@ static int spec_sync_register_device(void) struct dummy_spec_fence *dummy_fence_p = NULL; int ret; +#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE) + sync_dev.dev_class = class_create(CLASS_NAME); +#else sync_dev.dev_class = class_create(THIS_MODULE, CLASS_NAME); +#endif if (sync_dev.dev_class == NULL) { pr_err("%s: class_create fail.\n", __func__); goto res_err; From 81c92ac5a0a1ec60d8a28afba333885781bedce0 Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 12 Oct 2023 10:42:44 -0700 Subject: [PATCH 083/166] mm-drivers: hw_fence: rate limit probe failure messages This change ensures that error messages are only printed for probe failure once and not on subsequent probe failures. Change-Id: I6f1d893c118dd7d03170b8c4a8984d72924b09b3 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_debug.h | 4 ++++ hw_fence/src/hw_fence_drv_utils.c | 2 +- hw_fence/src/msm_hw_fence.c | 6 +++--- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index b6f6f14e19..4f754b7c28 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -36,6 +36,10 @@ extern u32 msm_hw_fence_debug_level; pr_err("[hwfence:%s:%d][err][%pS] "fmt, __func__, __LINE__, \ __builtin_return_address(0), ##__VA_ARGS__) +#define HWFNC_ERR_ONCE(fmt, ...) \ + pr_err_once("[hwfence:%s:%d][err][%pS] "fmt, __func__, __LINE__, \ + __builtin_return_address(0), ##__VA_ARGS__) + #define HWFNC_DBG_H(fmt, ...) \ dprintk(HW_FENCE_HIGH, "[hwfence:%s:%d][dbgh]"fmt, __func__, __LINE__, ##__VA_ARGS__) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index f2b34c1c4f..b7c642fedd 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -608,7 +608,7 @@ int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) HWFNC_DBG_INIT("notifier: ret:%d peer_name:%d notifier_ret:%d\n", ret, drv_data->peer_name, notifier_ret); if (notifier_ret) { - HWFNC_ERR("fail to register notifier ret:%d\n", notifier_ret); + HWFNC_ERR_ONCE("fail to register notifier ret:%d\n", notifier_ret); return -EPROBE_DEFER; } diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 5ec9286eb5..8b22d72d85 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -687,7 +687,7 @@ static int msm_hw_fence_probe_init(struct platform_device *pdev) /* Allocate hw fence driver mem pool and share it with HYP */ rc = hw_fence_utils_alloc_mem(hw_fence_drv_data); if (rc) { - HWFNC_ERR("failed to alloc base memory\n"); + HWFNC_ERR_ONCE("failed to alloc base memory\n"); goto error; } @@ -703,7 +703,7 @@ error: kfree(hw_fence_drv_data); hw_fence_drv_data = (void *) -EPROBE_DEFER; - HWFNC_ERR("error %d\n", rc); + HWFNC_ERR_ONCE("error %d\n", rc); return rc; } @@ -728,7 +728,7 @@ static int msm_hw_fence_probe(struct platform_device *pdev) return 0; err_exit: - HWFNC_ERR("error %d\n", rc); + HWFNC_ERR_ONCE("error %d\n", rc); return rc; } From 12350e5f0e537b8d2d423f3d3e3cf9b7edaec55a Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 16 Oct 2023 17:16:32 -0700 Subject: [PATCH 084/166] mm-drivers: add sync_fence and msm_ext_display headers Add snapshot of sync_fence and msm_ext_display headers as of qcom-6.1 commit fd8ea7d09a95 ("firmware: qcom_scm: change the interrupt handler behavior"). Change-Id: Ide04b34d0c4579d97d607c1ee492c89a40392963 Signed-off-by: Grace An --- msm_ext_display/include/msm_ext_display.h | 240 ++++++++++++++++++++++ sync_fence/include/qcom_sync_file.h | 39 ++++ 2 files changed, 279 insertions(+) create mode 100644 msm_ext_display/include/msm_ext_display.h create mode 100644 sync_fence/include/qcom_sync_file.h diff --git a/msm_ext_display/include/msm_ext_display.h b/msm_ext_display/include/msm_ext_display.h new file mode 100644 index 0000000000..f6a8d10a3d --- /dev/null +++ b/msm_ext_display/include/msm_ext_display.h @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _MSM_EXT_DISPLAY_H_ +#define _MSM_EXT_DISPLAY_H_ + +#include +#include +#include + +#define AUDIO_ACK_SET_ENABLE BIT(5) +#define AUDIO_ACK_ENABLE BIT(4) +#define AUDIO_ACK_CONNECT BIT(0) + +#define MSM_EXT_DISP_MAX_CODECS 2 + +/* + * Flags to be used with the HPD operation of the external display + * interface: + * MSM_EXT_DISP_HPD_AUDIO: audio will be routed to external display + * MSM_EXT_DISP_HPD_VIDEO: video will be routed to external display + */ +#define MSM_EXT_DISP_HPD_AUDIO BIT(0) +#define MSM_EXT_DISP_HPD_VIDEO BIT(1) + +/** + * struct ext_disp_cable_notify - cable notify handler structure + * @link: a link for the linked list + * @status: current status of HDMI/DP cable connection + * @hpd_notify: callback function to provide cable status + */ +struct ext_disp_cable_notify { + struct list_head link; + int status; + void (*hpd_notify)(struct ext_disp_cable_notify *h); +}; + +struct msm_ext_disp_audio_edid_blk { + u8 *audio_data_blk; + unsigned int audio_data_blk_size; /* in bytes */ + u8 *spk_alloc_data_blk; + unsigned int spk_alloc_data_blk_size; /* in bytes */ +}; + +struct msm_ext_disp_audio_setup_params { + u32 sample_rate_hz; + u32 num_of_channels; + u32 channel_allocation; + u32 level_shift; + bool down_mix; + u32 sample_present; +}; + +/* + * External Display identifier for use to determine which interface + * the audio driver is interacting with. + */ +enum msm_ext_disp_type { + EXT_DISPLAY_TYPE_HDMI = EXTCON_DISP_HDMI, + EXT_DISPLAY_TYPE_DP = EXTCON_DISP_DP, + EXT_DISPLAY_TYPE_MAX = 0xFFFFFFFF +}; + +/* + * External Display cable state used by display interface to indicate + * connect/disconnect of interface. + */ +enum msm_ext_disp_cable_state { + EXT_DISPLAY_CABLE_DISCONNECT, + EXT_DISPLAY_CABLE_CONNECT, + EXT_DISPLAY_CABLE_STATE_MAX +}; + +/** + * External Display power state used by display interface to indicate + * power on/off of the interface. + */ +enum msm_ext_disp_power_state { + EXT_DISPLAY_POWER_OFF, + EXT_DISPLAY_POWER_ON, + EXT_DISPLAY_POWER_MAX +}; + +/** + * struct msm_ext_disp_codec_id - codec information + * @type: external display type + * @ctrl_id: controller id + * @stream_id: stream_id + */ +struct msm_ext_disp_codec_id { + enum msm_ext_disp_type type; + int ctrl_id; + int stream_id; +}; + +/** + * struct msm_ext_disp_intf_ops - operations exposed to display interface + * @audio_config: configures the audio operations exposed to codec driver + * @audio_notify: notifies the audio connection state to user modules. + * @video_notify: notifies the video connection state to user modules. + */ +struct msm_ext_disp_intf_ops { + int (*audio_config)(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state state); + + int (*audio_notify)(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state state); + + int (*video_notify)(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state state); +}; + +/** + * struct msm_ext_disp_audio_codec_ops - operations exposed to audio codec + * @audio_info_setup: configure audio on interface + * @get_audio_edid_blk: retrieve audio edid block + * @cable_status: cable connected/disconnected + * @get_intf_id: id of connected interface + * @teardown_done: audio session teardown done by qdsp + * @acknowledge: acknowledge audio status received by user modules + * @ready: notify audio when codec driver is ready. + */ +struct msm_ext_disp_audio_codec_ops { + int (*audio_info_setup)(struct platform_device *pdev, + struct msm_ext_disp_audio_setup_params *params); + int (*get_audio_edid_blk)(struct platform_device *pdev, + struct msm_ext_disp_audio_edid_blk *blk); + int (*cable_status)(struct platform_device *pdev, u32 vote); + int (*get_intf_id)(struct platform_device *pdev); + void (*teardown_done)(struct platform_device *pdev); + int (*acknowledge)(struct platform_device *pdev, u32 ack); + int (*ready)(struct platform_device *pdev); +}; + +/** + * struct msm_ext_disp_init_data - data needed to register a display interface + * @type: external display type + * @intf_ops: external display interface operations + * @codec_ops: audio codec operations + * @pdev: platform device instance of the interface driver + * @intf_data: interface specific data + */ +struct msm_ext_disp_init_data { + struct msm_ext_disp_codec_id codec; + struct msm_ext_disp_intf_ops intf_ops; + struct msm_ext_disp_audio_codec_ops codec_ops; + struct platform_device *pdev; + void *intf_data; +}; + +/** + * struct msm_ext_disp_data - data needed by interface modules + * @intf_pdev: platform device instance of the interface + * @intf_data: data related to interface module + */ +struct msm_ext_disp_data { + struct platform_device *intf_pdev; + void *intf_data; +}; + +#if IS_ENABLED(CONFIG_MSM_EXT_DISPLAY) +/** + * msm_ext_disp_register_audio_codec() - audio codec registration + * @pdev: platform device pointer + * @codec_ops: audio codec operations + */ +int msm_ext_disp_register_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_audio_codec_ops *ops); + +/** + * msm_ext_disp_select_audio_codec() - select audio codec + * @pdev: platform device pointer + * @codec: codec id information + */ +int msm_ext_disp_select_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec); + +/** + * msm_hdmi_register_audio_codec() - wrapper for hdmi audio codec + * registration + * @pdev: platform device pointer + * @codec_ops: audio codec operations + */ +int msm_hdmi_register_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_audio_codec_ops *ops); + +/** + * msm_ext_disp_register_intf() - display interface registration + * @init_data: data needed to register the display interface + */ +int msm_ext_disp_register_intf(struct platform_device *pdev, + struct msm_ext_disp_init_data *init_data); + +/** + * msm_ext_disp_deregister_intf() - display interface deregistration + * @init_data: data needed to deregister the display interface + */ +int msm_ext_disp_deregister_intf(struct platform_device *pdev, + struct msm_ext_disp_init_data *init_data); + +#else +static inline int msm_ext_disp_register_audio_codec( + struct platform_device *pdev, + struct msm_ext_disp_audio_codec_ops *ops) +{ + return 0; +} + +static inline int msm_ext_disp_select_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec) +{ + return 0; +} + +static inline int msm_hdmi_register_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_audio_codec_ops *ops) +{ + return 0; +} + +static inline int msm_ext_disp_register_intf(struct platform_device *pdev, + struct msm_ext_disp_init_data *init_data) +{ + return 0; +} + +static inline int msm_ext_disp_deregister_intf(struct platform_device *pdev, + struct msm_ext_disp_init_data *init_data) +{ + return 0; +} +#endif + +#endif /*_MSM_EXT_DISPLAY_H_*/ diff --git a/sync_fence/include/qcom_sync_file.h b/sync_fence/include/qcom_sync_file.h new file mode 100644 index 0000000000..5e6e541865 --- /dev/null +++ b/sync_fence/include/qcom_sync_file.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _LINUX_QCOM_SPEC_SYNC_H +#define _LINUX_QCOM_SPEC_SYNC_H + +#include + +#define SPEC_FENCE_FLAG_FENCE_ARRAY 16 /* fence-array is speculative */ +#define SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND 17 /* fence-array is bound */ + +#if IS_ENABLED(CONFIG_QCOM_SPEC_SYNC) + +/** + * spec_sync_wait_bind_array() - Waits until the fence-array passed as parameter is bound. + * @fence_array: fence-array to wait-on until it is populated. + * @timeout_ms: timeout to wait. + * + * This function will wait until the fence-array passed as paremeter is bound; i.e. all the + * dma-fences that conform the fence-array are populated by the spec-fence driver bind ioctl. + * Once this function returns success, all the fences in the array should be valid. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_ms); + +#else + +static inline int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_ms) +{ + return -EINVAL; +} + +#endif /* CONFIG_QCOM_SPEC_SYNC */ + +#endif /* _LINUX_QCOM_SPEC_SYNC_H */ From 9833afb3d56b4bfe5e77a09eae1014ed44c3433f Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 16 Oct 2023 17:17:34 -0700 Subject: [PATCH 085/166] mm-drivers: fix sync_fence and msm_ext_display header paths Modify sync_fence and msm_ext_display headers to compile from Display SI. Change-Id: I3d69c883d7f37ee92b265f324c738774347ba6ab Signed-off-by: Grace An --- BUILD.bazel | 2 ++ msm_ext_display/BUILD.bazel | 6 ++++++ msm_ext_display/src/msm_ext_display.c | 4 ++-- sync_fence/BUILD.bazel | 6 ++++++ sync_fence/src/qcom_sync_file.c | 2 +- 5 files changed, 17 insertions(+), 3 deletions(-) diff --git a/BUILD.bazel b/BUILD.bazel index 5f4185bcfe..fb6cad061f 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -18,5 +18,7 @@ ddk_headers( ":mm_drivers_configs", "//vendor/qcom/opensource/mm-drivers/hw_fence:hw_fence_headers", "//vendor/qcom/opensource/mm-drivers/sync_fence:sync_fence_uapi_headers", + "//vendor/qcom/opensource/mm-drivers/msm_ext_display:msm_ext_display_headers", + "//vendor/qcom/opensource/mm-drivers/sync_fence:sync_fence_headers", ], ) diff --git a/msm_ext_display/BUILD.bazel b/msm_ext_display/BUILD.bazel index 5f30f80c12..0939b45466 100644 --- a/msm_ext_display/BUILD.bazel +++ b/msm_ext_display/BUILD.bazel @@ -7,4 +7,10 @@ package( ], ) +ddk_headers( + name = "msm_ext_display_headers", + hdrs = glob(["include/*.h"]), + includes = ["include"] +) + define_msm_ext_display() diff --git a/msm_ext_display/src/msm_ext_display.c b/msm_ext_display/src/msm_ext_display.c index 57da7fe2ee..59ea16f43c 100644 --- a/msm_ext_display/src/msm_ext_display.c +++ b/msm_ext_display/src/msm_ext_display.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include struct msm_ext_disp_list { diff --git a/sync_fence/BUILD.bazel b/sync_fence/BUILD.bazel index 8da9507b61..ac0a95d918 100644 --- a/sync_fence/BUILD.bazel +++ b/sync_fence/BUILD.bazel @@ -13,4 +13,10 @@ ddk_headers( includes = ["include"] ) +ddk_headers( + name = "sync_fence_headers", + hdrs = glob(["include/*.h"]), + includes = ["include"] +) + define_sync_fence() diff --git a/sync_fence/src/qcom_sync_file.c b/sync_fence/src/qcom_sync_file.c index 8bc4fea268..7c2165d6e4 100644 --- a/sync_fence/src/qcom_sync_file.c +++ b/sync_fence/src/qcom_sync_file.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #define CLASS_NAME "sync" From 4e31d33a03a4e18f846c140e63c86d92a1b7294d Mon Sep 17 00:00:00 2001 From: Veera Sundaram Sankaran Date: Wed, 23 Aug 2023 11:20:12 -0700 Subject: [PATCH 086/166] mm-drivers: switch from EXPORT_SYMBOL to EXPORT_SYMBOL_GPL Use EXPORT_SYMBOL_GPL instead of EXPORT_SYMBOL in downstream driver as the usage is approved now. Change-Id: Ifbe39b4b8dda8a3942fcc9a55afd624426a4eaed Signed-off-by: Narendra Muppalla Signed-off-by: Veera Sundaram Sankaran --- hw_fence/src/msm_hw_fence.c | 34 ++++++++++---------- hw_fence/src/msm_hw_fence_synx_translation.c | 14 ++++---- msm_ext_display/src/msm_ext_display.c | 10 +++--- sync_fence/src/qcom_sync_file.c | 4 +-- 4 files changed, 31 insertions(+), 31 deletions(-) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 5ec9286eb5..2bd31c89d7 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -138,7 +138,7 @@ error: HWFNC_ERR("failed with error:%d\n", ret); return ERR_PTR(ret); } -EXPORT_SYMBOL(msm_hw_fence_register); +EXPORT_SYMBOL_GPL(msm_hw_fence_register); int msm_hw_fence_deregister(void *client_handle) { @@ -164,7 +164,7 @@ int msm_hw_fence_deregister(void *client_handle) return 0; } -EXPORT_SYMBOL(msm_hw_fence_deregister); +EXPORT_SYMBOL_GPL(msm_hw_fence_deregister); int msm_hw_fence_create(void *client_handle, struct msm_hw_fence_create_params *params) @@ -217,7 +217,7 @@ int msm_hw_fence_create(void *client_handle, return 0; } -EXPORT_SYMBOL(msm_hw_fence_create); +EXPORT_SYMBOL_GPL(msm_hw_fence_create); int msm_hw_fence_destroy(void *client_handle, struct dma_fence *fence) @@ -262,7 +262,7 @@ int msm_hw_fence_destroy(void *client_handle, return 0; } -EXPORT_SYMBOL(msm_hw_fence_destroy); +EXPORT_SYMBOL_GPL(msm_hw_fence_destroy); int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle) { @@ -294,7 +294,7 @@ int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle) return 0; } -EXPORT_SYMBOL(msm_hw_fence_destroy_with_handle); +EXPORT_SYMBOL_GPL(msm_hw_fence_destroy_with_handle); int msm_hw_fence_wait_update_v2(void *client_handle, struct dma_fence **fence_list, u64 *handles, u64 *client_data_list, u32 num_fences, @@ -366,7 +366,7 @@ int msm_hw_fence_wait_update_v2(void *client_handle, return 0; } -EXPORT_SYMBOL(msm_hw_fence_wait_update_v2); +EXPORT_SYMBOL_GPL(msm_hw_fence_wait_update_v2); int msm_hw_fence_wait_update(void *client_handle, struct dma_fence **fence_list, u32 num_fences, bool create) @@ -374,7 +374,7 @@ int msm_hw_fence_wait_update(void *client_handle, return msm_hw_fence_wait_update_v2(client_handle, fence_list, NULL, NULL, num_fences, create); } -EXPORT_SYMBOL(msm_hw_fence_wait_update); +EXPORT_SYMBOL_GPL(msm_hw_fence_wait_update); int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) { @@ -404,7 +404,7 @@ int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) return 0; } -EXPORT_SYMBOL(msm_hw_fence_reset_client); +EXPORT_SYMBOL_GPL(msm_hw_fence_reset_client); int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id_ext, u32 reset_flags) { @@ -425,7 +425,7 @@ int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id_ext, u32 r return msm_hw_fence_reset_client(hw_fence_drv_data->clients[client_id], reset_flags); } -EXPORT_SYMBOL(msm_hw_fence_reset_client_by_id); +EXPORT_SYMBOL_GPL(msm_hw_fence_reset_client_by_id); int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error) { @@ -451,7 +451,7 @@ int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 erro return 0; } -EXPORT_SYMBOL(msm_hw_fence_update_txq); +EXPORT_SYMBOL_GPL(msm_hw_fence_update_txq); int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, u32 update_flags) @@ -480,7 +480,7 @@ int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, u3 return 0; } -EXPORT_SYMBOL(msm_hw_fence_update_txq_error); +EXPORT_SYMBOL_GPL(msm_hw_fence_update_txq_error); /* tx client has to be the physical, rx client virtual id*/ int msm_hw_fence_trigger_signal(void *client_handle, @@ -505,7 +505,7 @@ int msm_hw_fence_trigger_signal(void *client_handle, return 0; } -EXPORT_SYMBOL(msm_hw_fence_trigger_signal); +EXPORT_SYMBOL_GPL(msm_hw_fence_trigger_signal); int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t cb, void *data) { @@ -533,7 +533,7 @@ int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t return 0; } -EXPORT_SYMBOL(msm_hw_fence_register_error_cb); +EXPORT_SYMBOL_GPL(msm_hw_fence_register_error_cb); int msm_hw_fence_deregister_error_cb(void *client_handle) { @@ -572,7 +572,7 @@ exit: return 0; } -EXPORT_SYMBOL(msm_hw_fence_deregister_error_cb); +EXPORT_SYMBOL_GPL(msm_hw_fence_deregister_error_cb); #if IS_ENABLED(CONFIG_DEBUG_FS) int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, u32 dump_clients_mask) @@ -608,7 +608,7 @@ int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, u32 dump_c return 0; } -EXPORT_SYMBOL(msm_hw_fence_dump_debug_data); +EXPORT_SYMBOL_GPL(msm_hw_fence_dump_debug_data); int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence) { @@ -640,7 +640,7 @@ int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence) return 0; } -EXPORT_SYMBOL(msm_hw_fence_dump_fence); +EXPORT_SYMBOL_GPL(msm_hw_fence_dump_fence); #endif /* CONFIG_DEBUG_FS */ /* Function used for simulation purposes only. */ @@ -658,7 +658,7 @@ int msm_hw_fence_driver_doorbell_sim(u64 db_mask) return 0; } -EXPORT_SYMBOL(msm_hw_fence_driver_doorbell_sim); +EXPORT_SYMBOL_GPL(msm_hw_fence_driver_doorbell_sim); static int msm_hw_fence_probe_init(struct platform_device *pdev) { diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 6970eb4d60..195a6c524c 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -134,7 +134,7 @@ struct synx_session *synx_hwfence_initialize(struct synx_initialization_params * return session; } -EXPORT_SYMBOL(synx_hwfence_initialize); +EXPORT_SYMBOL_GPL(synx_hwfence_initialize); int synx_hwfence_uninitialize(struct synx_session *session) { @@ -154,7 +154,7 @@ int synx_hwfence_uninitialize(struct synx_session *session) return to_synx_status(ret); } -EXPORT_SYMBOL(synx_hwfence_uninitialize); +EXPORT_SYMBOL_GPL(synx_hwfence_uninitialize); int synx_hwfence_create(struct synx_session *session, struct synx_create_params *params) { @@ -196,7 +196,7 @@ int synx_hwfence_create(struct synx_session *session, struct synx_create_params return SYNX_SUCCESS; } -EXPORT_SYMBOL(synx_hwfence_create); +EXPORT_SYMBOL_GPL(synx_hwfence_create); int synx_hwfence_release(struct synx_session *session, u32 h_synx) { @@ -215,7 +215,7 @@ int synx_hwfence_release(struct synx_session *session, u32 h_synx) return to_synx_status(ret); } -EXPORT_SYMBOL(synx_hwfence_release); +EXPORT_SYMBOL_GPL(synx_hwfence_release); int synx_hwfence_signal(struct synx_session *session, u32 h_synx, enum synx_signal_status status) { @@ -234,7 +234,7 @@ int synx_hwfence_signal(struct synx_session *session, u32 h_synx, enum synx_sign return to_synx_status(ret); } -EXPORT_SYMBOL(synx_hwfence_signal); +EXPORT_SYMBOL_GPL(synx_hwfence_signal); int synx_hwfence_recover(enum synx_client_id id) { @@ -252,7 +252,7 @@ int synx_hwfence_recover(enum synx_client_id id) return to_synx_status(ret); } -EXPORT_SYMBOL(synx_hwfence_recover); +EXPORT_SYMBOL_GPL(synx_hwfence_recover); static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params *params) { @@ -332,4 +332,4 @@ int synx_hwfence_import(struct synx_session *session, struct synx_import_params return ret; } -EXPORT_SYMBOL(synx_hwfence_import); +EXPORT_SYMBOL_GPL(synx_hwfence_import); diff --git a/msm_ext_display/src/msm_ext_display.c b/msm_ext_display/src/msm_ext_display.c index 57da7fe2ee..3894fc118c 100644 --- a/msm_ext_display/src/msm_ext_display.c +++ b/msm_ext_display/src/msm_ext_display.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ @@ -407,7 +407,7 @@ end: return ret; } -EXPORT_SYMBOL(msm_ext_disp_register_audio_codec); +EXPORT_SYMBOL_GPL(msm_ext_disp_register_audio_codec); int msm_ext_disp_select_audio_codec(struct platform_device *pdev, struct msm_ext_disp_codec_id *codec) @@ -447,7 +447,7 @@ end: return ret; } -EXPORT_SYMBOL(msm_ext_disp_select_audio_codec); +EXPORT_SYMBOL_GPL(msm_ext_disp_select_audio_codec); static int msm_ext_disp_validate_intf(struct msm_ext_disp_init_data *init_data) { @@ -538,7 +538,7 @@ end: mutex_unlock(&ext_disp->lock); return ret; } -EXPORT_SYMBOL(msm_ext_disp_register_intf); +EXPORT_SYMBOL_GPL(msm_ext_disp_register_intf); int msm_ext_disp_deregister_intf(struct platform_device *pdev, struct msm_ext_disp_init_data *init_data) @@ -577,7 +577,7 @@ end: return ret; } -EXPORT_SYMBOL(msm_ext_disp_deregister_intf); +EXPORT_SYMBOL_GPL(msm_ext_disp_deregister_intf); static int msm_ext_disp_probe(struct platform_device *pdev) { diff --git a/sync_fence/src/qcom_sync_file.c b/sync_fence/src/qcom_sync_file.c index 04d8951233..7fc9513cdc 100644 --- a/sync_fence/src/qcom_sync_file.c +++ b/sync_fence/src/qcom_sync_file.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2021, The Linux Foundation. All rights reserved. */ @@ -342,7 +342,7 @@ int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_m return ret; } -EXPORT_SYMBOL(spec_sync_wait_bind_array); +EXPORT_SYMBOL_GPL(spec_sync_wait_bind_array); static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) { From 0e41afe42769b9772e95fbc991bc429a5b4e2a51 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Thu, 17 Aug 2023 11:56:50 -0700 Subject: [PATCH 087/166] mm-drivers: hw_fence: add snapshot of hw-fence driver header in Display SI Add snapshot of the hw fence driver header as of qcom-6.4 commit 76b6fe6f907d ("defconfig: Enable PM8008 regulator driver for BLAIR") into Display SI. Change-Id: Id57863a2ecbb043ae953adbda1b55630872e2b8f Signed-off-by: Ingrid Gallardo --- hw_fence/include/msm_hw_fence.h | 641 ++++++++++++++++++++++++++++++++ 1 file changed, 641 insertions(+) create mode 100644 hw_fence/include/msm_hw_fence.h diff --git a/hw_fence/include/msm_hw_fence.h b/hw_fence/include/msm_hw_fence.h new file mode 100644 index 0000000000..62c0f3ba85 --- /dev/null +++ b/hw_fence/include/msm_hw_fence.h @@ -0,0 +1,641 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __MSM_HW_FENCE_H +#define __MSM_HW_FENCE_H + +#include +#include + +/** + * MSM_HW_FENCE_FLAG_ENABLED_BIT - Hw-fence is enabled for the dma_fence. + * + * Drivers set this flag in the dma_fence 'flags' to fences that + * are backed up by a hw-fence. + */ +#define MSM_HW_FENCE_FLAG_ENABLED_BIT 31 + +/** + * MSM_HW_FENCE_FLAG_SIGNALED_BIT - Hw-fence is signaled for the dma_fence. + * + * This flag is set by hw-fence driver when a client wants to add itself as + * a waiter for this hw-fence. The client uses this flag to avoid adding itself + * as a waiter for a fence that is already retired. + */ +#define MSM_HW_FENCE_FLAG_SIGNALED_BIT 30 + +/** + * MSM_HW_FENCE_ERROR_RESET - Hw-fence flagged as error due to forced reset from producer. + */ +#define MSM_HW_FENCE_ERROR_RESET BIT(0) + +/** + * MSM_HW_FENCE_RESET_WITHOUT_ERROR: Resets client and its hw-fences, signaling them without error. + * MSM_HW_FENCE_RESET_WITHOUT_DESTROY: Resets client and its hw-fences, signaling without + * destroying the fences. + */ +#define MSM_HW_FENCE_RESET_WITHOUT_ERROR BIT(0) +#define MSM_HW_FENCE_RESET_WITHOUT_DESTROY BIT(1) + +/** + * MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE: Updates client tx queue error by moving fence with error to + * beginning of queue. + */ +#define MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE BIT(0) + +/** + * MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - Maximum number of signals per client + */ +#define MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT 64 + +/** + * MSM_HW_FENCE_DBG_DUMP_QUEUES: Dumps queues information + * MSM_HW_FENCE_DBG_DUMP_TABLE: Dumps hwfence table + * MSM_HW_FENCE_DBG_DUMP_EVENTS: Dumps hwfence ctl events + */ +#define MSM_HW_FENCE_DBG_DUMP_QUEUES BIT(0) +#define MSM_HW_FENCE_DBG_DUMP_TABLE BIT(1) +#define MSM_HW_FENCE_DBG_DUMP_EVENTS BIT(2) + +/** + * struct msm_hw_fence_create_params - Creation parameters. + * + * @name : Optional parameter associating a name with the object for debug purposes. + * Only first 64 bytes are accepted, rest will be ignored. + * @handle : Pointer to fence handle (filled by function). + * @fence : Pointer to fence. + * @flags : flags for customization. + */ +struct msm_hw_fence_create_params { + const char *name; + u64 *handle; + void *fence; + u32 flags; +}; + +/** + * struct msm_hw_fence_hfi_queue_table_header - HFI queue table structure. + * @version: HFI protocol version. + * @size: Queue table size in dwords. + * @qhdr0_offset: First queue header offset (dwords) in this table. + * @qhdr_size: Queue header size. + * @num_q: Number of queues defined in this table. + * @num_active_q: Number of active queues. + */ +struct msm_hw_fence_hfi_queue_table_header { + u32 version; + u32 size; + u32 qhdr0_offset; + u32 qhdr_size; + u32 num_q; + u32 num_active_q; +}; + +/** + * struct msm_hw_fence_hfi_queue_header - HFI queue header structure. + * @status: Active = 1, Inactive = 0. + * @start_addr: Starting address of the queue. + * @type: Queue type (rx/tx). + * @queue_size: Size of the queue. + * @pkt_size: Size of the queue packet entries, + * 0 - means variable size of message in the queue, + * non-zero - size of the packet, fixed. + * @pkt_drop_cnt: Number of packets drop by sender. + * @rx_wm: Receiver watermark, applicable in event driven mode. + * @tx_wm: Sender watermark, applicable in event driven mode. + * @rx_req: Receiver sets this bit if queue is empty. + * @tx_req: Sender sets this bit if queue is full. + * @rx_irq_status: Receiver sets this bit and triggers an interrupt to the + * sender after packets are dequeued. Sender clears this bit. + * @tx_irq_status: Sender sets this bit and triggers an interrupt to the + * receiver after packets are queued. Receiver clears this bit. + * @read_index: read index of the queue. + * @write_index: write index of the queue. + */ +struct msm_hw_fence_hfi_queue_header { + u32 status; + u32 start_addr; + u32 type; + u32 queue_size; + u32 pkt_size; + u32 pkt_drop_cnt; + u32 rx_wm; + u32 tx_wm; + u32 rx_req; + u32 tx_req; + u32 rx_irq_status; + u32 tx_irq_status; + u32 read_index; + u32 write_index; +}; + +/** + * struct msm_hw_fence_mem_addr - Memory descriptor of the queue allocated by + * the fence driver for each client during + * register. + * @virtual_addr: Kernel virtual address of the queue. + * @device_addr: Physical address of the memory object. + * @size: Size of the memory. + * @mem_data: Internal pointer with the attributes of the allocation. + */ +struct msm_hw_fence_mem_addr { + void *virtual_addr; + phys_addr_t device_addr; + u64 size; + void *mem_data; +}; + +/** + * struct msm_hw_fence_cb_data - Data passed back in fence error callback. + * @data: data registered with callback + * @fence: fence signaled with error + */ +struct msm_hw_fence_cb_data { + void *data; + struct dma_fence *fence; +}; + +/** + * msm_hw_fence_error_cb: Callback function registered by waiting clients. + * Dispatched when client is waiting on a fence + * signaled with error. + * + * @handle: handle of fence signaled with error + * @error: error signed for fence + * @cb_data: pointer to struct containing opaque pointer registered with callback + * and fence information + */ +typedef void (*msm_hw_fence_error_cb_t)(u32 handle, int error, void *cb_data); + +/** + * enum hw_fence_client_id - Unique identifier of the supported clients. + * @HW_FENCE_CLIENT_ID_CTX0: GFX Client. + * @HW_FENCE_CLIENT_ID_CTL0: DPU Client 0. + * @HW_FENCE_CLIENT_ID_CTL1: DPU Client 1. + * @HW_FENCE_CLIENT_ID_CTL2: DPU Client 2. + * @HW_FENCE_CLIENT_ID_CTL3: DPU Client 3. + * @HW_FENCE_CLIENT_ID_CTL4: DPU Client 4. + * @HW_FENCE_CLIENT_ID_CTL5: DPU Client 5. + * @HW_FENCE_CLIENT_ID_VAL0: debug Validation client 0. + * @HW_FENCE_CLIENT_ID_VAL1: debug Validation client 1. + * @HW_FENCE_CLIENT_ID_VAL2: debug Validation client 2. + * @HW_FENCE_CLIENT_ID_VAL3: debug Validation client 3. + * @HW_FENCE_CLIENT_ID_VAL4: debug Validation client 4. + * @HW_FENCE_CLIENT_ID_VAL5: debug Validation client 5. + * @HW_FENCE_CLIENT_ID_VAL6: debug Validation client 6. + * @HW_FENCE_CLIENT_ID_IPE: IPE Client. + * @HW_FENCE_CLIENT_ID_VPU: VPU Client. + * @HW_FENCE_CLIENT_ID_IFE0: IFE0 Client 0. + * @HW_FENCE_CLIENT_ID_IFE1: IFE1 Client 0. + * @HW_FENCE_CLIENT_ID_IFE2: IFE2 Client 0. + * @HW_FENCE_CLIENT_ID_IFE3: IFE3 Client 0. + * @HW_FENCE_CLIENT_ID_IFE4: IFE4 Client 0. + * @HW_FENCE_CLIENT_ID_IFE5: IFE5 Client 0. + * @HW_FENCE_CLIENT_ID_IFE6: IFE6 Client 0. + * @HW_FENCE_CLIENT_ID_IFE7: IFE7 Client 0. + * @HW_FENCE_CLIENT_MAX: Max number of clients, any client must be added + * before this enum. + */ +enum hw_fence_client_id { + HW_FENCE_CLIENT_ID_CTX0 = 0x1, + HW_FENCE_CLIENT_ID_CTL0, + HW_FENCE_CLIENT_ID_CTL1, + HW_FENCE_CLIENT_ID_CTL2, + HW_FENCE_CLIENT_ID_CTL3, + HW_FENCE_CLIENT_ID_CTL4, + HW_FENCE_CLIENT_ID_CTL5, + HW_FENCE_CLIENT_ID_VAL0, + HW_FENCE_CLIENT_ID_VAL1, + HW_FENCE_CLIENT_ID_VAL2, + HW_FENCE_CLIENT_ID_VAL3, + HW_FENCE_CLIENT_ID_VAL4, + HW_FENCE_CLIENT_ID_VAL5, + HW_FENCE_CLIENT_ID_VAL6, + HW_FENCE_CLIENT_ID_IPE, + HW_FENCE_CLIENT_ID_VPU = HW_FENCE_CLIENT_ID_IPE + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE0 = HW_FENCE_CLIENT_ID_VPU + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE1 = HW_FENCE_CLIENT_ID_IFE0 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE2 = HW_FENCE_CLIENT_ID_IFE1 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE3 = HW_FENCE_CLIENT_ID_IFE2 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE4 = HW_FENCE_CLIENT_ID_IFE3 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE5 = HW_FENCE_CLIENT_ID_IFE4 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE6 = HW_FENCE_CLIENT_ID_IFE5 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE7 = HW_FENCE_CLIENT_ID_IFE6 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_MAX = HW_FENCE_CLIENT_ID_IFE7 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT +}; + +#if IS_ENABLED(CONFIG_QTI_HW_FENCE) +/** + * msm_hw_fence_register() - Registers a client with the HW Fence Driver. + * @client_id: ID of the client that is being registered. + * @mem_descriptor: Pointer to fill the memory descriptor. Fence + * controller driver fills this pointer with the + * memory descriptor for the rx/tx queues. + * + * This call initializes any shared memory region for the tables/queues + * required for the HW Fence Driver to communicate with Fence Controller + * for this client_id and fills the memory descriptor for the queues + * that the client hw cores need to manage. + * + * Return: Handle to the client object that must be used for further calls + * to the fence controller driver or NULL in case of error. + * + * The returned handle is used internally by the fence controller driver + * in further calls to identify the client and access any resources + * allocated for this client. + */ +void *msm_hw_fence_register( + enum hw_fence_client_id client_id, + struct msm_hw_fence_mem_addr *mem_descriptor); + +/** + * msm_hw_fence_deregister() - Deregisters a client that was previously + * registered with the HW Fence Driver. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_deregister(void *client_handle); + +/** + * msm_hw_fence_create() - Creates a new hw fence. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @params: Hw fence creation parameters containing dma fence + * to create its associated hw-fence. + * + * This call creates the hw fence and registers it with the fence + * controller. After the creation of this fence, it is a Client Driver + * responsibility to 'destroy' this fence to prevent any leakage of + * hw-fence resources. + * To destroy a fence, 'msm_hw_fence_destroy' must be called, once the + * fence is not required anymore, which is when all the references to + * the dma-fence are released. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_create(void *client_handle, + struct msm_hw_fence_create_params *params); + +/** + * msm_hw_fence_destroy() - Destroys a hw fence. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @fence: Sw dma-fence to destroy its associated hw-fence. + * + * The fence destroyed by this function, is a fence that must have been + * created by the hw fence driver through 'msm_hw_fence_create' call. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_destroy(void *client_handle, + struct dma_fence *fence); + +/** + * msm_hw_fence_destroy_with_handle() - Destroys a hw fence through its handle. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @handle: handle for hw-fence to destroy + * + * The fence destroyed by this function, is a fence that must have been + * created by the hw fence driver through 'msm_hw_fence_create' call. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle); + +/** + * msm_hw_fence_wait_update_v2() - Register or unregister the Client with the + * Fence Controller as a waiting-client of the + * list of fences received as parameter. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @fences: Pointer to an array of pointers containing the fences to + * 'wait-on' for this client. If a 'fence-array' fence is passed, + * driver will iterate through the individual 'fences' which are + * part of the 'fence-array' and will register to wait-for-all the + * individual fences of the fence-array. + * A 'fence-array' passed as parameter can only have 'individual' + * fences and cannot have another nested 'fence-array', + * otherwise this API will return failure. + * Also, all the 'fences' in this list must have a corresponding + * hw-fence that was registered by the producer of the fence, + * otherwise, this API will return failure. + * @handles: Optional pointer to an array of handles of 'fences'. + * If non-null, these handles are filled by the function. + * This list must have the same size as 'fences' if present. + * @client_data_list: Optional pointer to an array of u64 client_data + * values for each fence in 'fences'. + * If non-null, this list must have the same size as + * the 'fences' list. This client registers each fence + * with the client_data value at the same index so that + * this value is returned to the client upon signaling + * of the fence. + * If a null pointer is provided, a default value of + * zero is registered as the client_data of each fence. + * @num_fences: Number of elements in the 'fences' list (and 'handles' and + * 'client_data_list' if either or both are present). + * @reg: Boolean to indicate if register or unregister for waiting on + * the hw-fence. + * + * If the 'register' boolean is set as true, this API will register with + * the Fence Controller the Client as a consumer (i.e. 'wait-client') of + * the fences received as parameter. + * Function will return immediately after the client was registered + * (i.e this function does not wait for the fences to be signaled). + * When any of the Fences received as parameter is signaled (or all the + * fences in case of a fence-array), Fence controller will trigger the hw + * signal to notify the Client hw-core about the signaled fence (or fences + * in case of a fence array). i.e. signalization of the hw fence it is a + * hw to hw communication between Fence Controller and the Client hw-core, + * and this API is only the interface to allow the Client Driver to + * register its Client hw-core for the hw-to-hw notification. + * If the 'register' boolean is set as false, this API will unregister + * with the Fence Controller the Client as a consumer, this is used for + * cases where a Timeout waiting for a fence occurs and client drivers want + * to unregister for signal. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_wait_update_v2(void *client_handle, + struct dma_fence **fences, u64 *handles, u64 *client_data_list, u32 num_fences, bool reg); + +/** + * msm_hw_fence_wait_update() - Register or unregister the Client with the + * Fence Controller as a waiting-client of the + * list of fences received as parameter. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @fences: Pointer to an array of pointers containing the fences to + * 'wait-on' for this client. If a 'fence-array' fence is passed, + * driver will iterate through the individual 'fences' which are + * part of the 'fence-array' and will register to wait-for-all the + * individual fences of the fence-array. + * A 'fence-array' passed as parameter can only have 'individual' + * fences and cannot have another nested 'fence-array', + * otherwise this API will return failure. + * Also, all the 'fences' in this list must have a corresponding + * hw-fence that was registered by the producer of the fence, + * otherwise, this API will return failure. + * @num_fences: Number of elements in the 'fences' list. + * @reg: Boolean to indicate if register or unregister for waiting on + * the hw-fence. + * + * If the 'register' boolean is set as true, this API will register with + * the Fence Controller the Client as a consumer (i.e. 'wait-client') of + * the fences received as parameter. + * Function will return immediately after the client was registered + * (i.e this function does not wait for the fences to be signaled). + * When any of the Fences received as parameter is signaled (or all the + * fences in case of a fence-array), Fence controller will trigger the hw + * signal to notify the Client hw-core about the signaled fence (or fences + * in case of a fence array). i.e. signalization of the hw fence it is a + * hw to hw communication between Fence Controller and the Client hw-core, + * and this API is only the interface to allow the Client Driver to + * register its Client hw-core for the hw-to-hw notification. + * If the 'register' boolean is set as false, this API will unregister + * with the Fence Controller the Client as a consumer, this is used for + * cases where a Timeout waiting for a fence occurs and client drivers want + * to unregister for signal. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_wait_update(void *client_handle, + struct dma_fence **fences, u32 num_fences, bool reg); + +/** + * msm_hw_fence_reset_client() - Resets the HW Fence Client. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @reset_flags: Flags to choose the reset type. See MSM_HW_FENCE_RESET_* + * definitions. + * + * This function iterates through the HW Fences and removes the client + * from the waiting-client mask in any of the HW Fences and signal the + * fences owned by that client. + * This function should only be called by clients upon error, when clients + * did a HW reset, to make sure any HW Fence where the client was register + * for wait are removed, and any Fence owned by the client are signaled. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags); + +/** + * msm_hw_fence_reset_client_by_id() - Resets the HW Fence Client through + * its id. + * @client_id: id of client to reset + * @reset_flags: Flags to choose the reset type. See MSM_HW_FENCE_RESET_* + * definitions. + * + * This function iterates through the HW Fences and removes the client + * from the waiting-client mask in any of the HW Fences and signal the + * fences owned by that client. + * This function should only be called by clients upon error, when clients + * did a HW reset, to make sure any HW Fence where the client was register + * for wait are removed, and any Fence owned by the client are signaled. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id, u32 reset_flags); + +/** + * msm_hw_fence_update_txq() - Updates Client Tx Queue with the Fence info. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @handle: handle for fence to update in the Tx Queue. + * @flags: flags to set in the queue for the fence. + * @error: error to set in the queue for the fence. + * + * This function should only be used by clients that cannot have the Tx Queue + * updated by the Firmware or the HW Core. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error); + +/** + * msm_hw_fence_update_txq_error() - Updates error field for fence already in Tx Queue. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @handle: handle for existing fence in Tx Queue to update. + * @error: error to set in the queue for the fence. + * @update_flags: flags to choose the update type. See MSM_HW_FENCE_UPDATE_ERROR_* + * definitions. + * + * This function should only be used by clients that cannot have the Tx Queue + * updated by the Firmware or the HW Core. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, u32 update_flags); + +/** + * msm_hw_fence_trigger_signal() - Triggers signal for the tx/rx signal pair + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @ tx_client_id: id of the client triggering the signal. + * @ rx_client_id: id of the client receiving the signal. + * @ signal_id: id of the signal to trigger + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_trigger_signal(void *client_handle, u32 tx_client_id, u32 rx_client_id, + u32 signal_id); + +/** + * msm_hw_fence_register_error_cb() - Register callback to be dispatched when + * HW Fence Client is waiting for a fence + * that is signaled with error. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * @cb: pointer to callback function to be invoked + * @data: opaque pointer passed back with callback + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t cb, void *data); + +/** + * msm_hw_fence_deregister_error_cb() - Deregister callback to be dispatched when + * HW Fence Client is waiting for a fence + * that is signaled with error. + * @client_handle: Hw fence driver client handle, this handle was returned + * during the call 'msm_hw_fence_register' to register the + * client. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_deregister_error_cb(void *client_handle); + +#else +static inline void *msm_hw_fence_register(enum hw_fence_client_id client_id, + struct msm_hw_fence_mem_addr *mem_descriptor) +{ + return NULL; +} + +static inline int msm_hw_fence_deregister(void *client_handle) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_create(void *client_handle, + struct msm_hw_fence_create_params *params) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_destroy(void *client_handle, struct dma_fence *fence) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_wait_update_v2(void *client_handle, + struct dma_fence **fences, u64 *handles, u64 *client_data_list, u32 num_fences, bool reg) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_wait_update(void *client_handle, + struct dma_fence **fences, u32 num_fences, bool reg) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id, + u32 reset_flags) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, + u32 update_flags) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_trigger_signal(void *client_handle, u32 tx_client_id, + u32 rx_client_id, u32 signal_id) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t cb, + void *data) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_deregister_error_cb(void *client_handle) +{ + return -EINVAL; +} +#endif + +#if IS_ENABLED(CONFIG_DEBUG_FS) && IS_ENABLED(CONFIG_QTI_HW_FENCE) +/** + * msm_hw_fence_dump_debug_data() - Dumps debug data information + * @client_handle: Hw fence driver client handle returned during 'msm_hw_fence_register'. + * @dump_flags: Flags to indicate which info to dump, see MSM_HW_FENCE_DBG_DUMP_** flags. + * @dump_clients_mask: Optional bitmask to indicate along with the caller of the api, which other + * clients to dump data from. E.g. a client like display might want to dump + * info of any all other clients from which it can receive fences, like gfx. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, u32 dump_clients_mask); + +/** + * msm_hw_fence_dump_debug_data() - Dumps hw-fence information for dma-fence + * @client_handle: Hw fence driver client handle returned during 'msm_hw_fence_register'. + * @fence: dma_fence to dump hw-fence information + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence); + +#else +static inline int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, + u32 dump_clients_mask) +{ + return -EINVAL; +} + +static inline int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence) +{ + return -EINVAL; +} +#endif + +#endif From 68c7ec857a2254bddf99d852d569332206212df4 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Thu, 17 Aug 2023 12:08:29 -0700 Subject: [PATCH 088/166] mm-drivers: hw_fence: fix hw fence driver header path Modify hw-fence driver header path to compile from display si. Change-Id: I58e5aa3cdce430be0cc5488b8a0cd6c2d68a9fc5 Signed-off-by: Ingrid Gallardo --- hw_fence/include/hw_fence_drv_priv.h | 2 +- hw_fence/src/msm_hw_fence_synx_translation.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index fb4caacf43..f3d088065c 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -10,9 +10,9 @@ #include #include #include -#include #include #include +#include "msm_hw_fence.h" /* max u64 to indicate invalid fence */ #define HW_FENCE_INVALID_PARENT_FENCE (~0ULL) diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 50b7e382b4..4b45f65a08 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -5,7 +5,7 @@ #include #include -#include +#include "msm_hw_fence.h" #include "msm_hw_fence_synx_translation.h" #include "hw_fence_drv_priv.h" #include "hw_fence_drv_debug.h" From 8b01ecabb67499767cabd040168cfac96d481bb2 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 20 Jun 2023 13:25:46 -0700 Subject: [PATCH 089/166] mm-drivers: hw_fence: fix bugs with hlos fence signaling The HW Fence Driver signals fences during client reset and when a hw-fence client registers on an already signaled fence. Add parent fence signaling to update the pending child count and signal join fences' waiting clients if all child fences are signaled. Add synchronization for already signaled fences, to prevent a race condition where the fence is signaled by both HW Fence Driver and Fence Controller. Add change to propagate error for already signaled fences. Change-Id: If9073a58590d9daf40e4bd47be01f19fe16ec203 Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_priv.c | 114 +++++++++++++++++++++++++++---- 1 file changed, 99 insertions(+), 15 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 61e3aedef8..eb0402ddec 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1403,6 +1403,32 @@ destroy_fence: false); } +/* update join fence for signaled child_fence and return if the join fence should be signaled */ +bool _update_and_get_join_fence_signal_status(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *join_fence, u32 child_fence_error) +{ + bool signal_join_fence, error = false; + + /* child fence is already signaled */ + GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */ + join_fence->error |= child_fence_error; + if (join_fence->pending_child_cnt) + join_fence->pending_child_cnt--; + else + error = true; + signal_join_fence = !join_fence->pending_child_cnt; + GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */ + + /* update memory for the table update */ + wmb(); + + if (error) + HWFNC_ERR("join fence ctx:%llu seq:%llu pending_child_cnt==0 before decrement\n", + join_fence->ctx_id, join_fence->seq_id); + + return signal_join_fence; +} + int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array, u64 *hash_join_fence, u64 client_data) @@ -1473,15 +1499,8 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, if (hw_fence_child->flags & MSM_HW_FENCE_FLAG_SIGNAL) { /* child fence is already signaled */ - GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */ - join_fence->error |= hw_fence_child->error; - if (--join_fence->pending_child_cnt == 0) - signal_join_fence = true; - - /* update memory for the table update */ - wmb(); - - GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */ + signal_join_fence = _update_and_get_join_fence_signal_status(drv_data, + join_fence, hw_fence_child->error); } else { /* child fence is not signaled */ @@ -1557,6 +1576,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, { struct msm_hw_fence *hw_fence; enum hw_fence_client_data_id data_id; + bool is_signaled; if (client_data) { data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext); @@ -1577,6 +1597,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ /* register client in the hw fence */ + is_signaled = hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL; hw_fence->wait_client_mask |= BIT(hw_fence_client->client_id); hw_fence->fence_wait_time = hw_fence_get_qtime(drv_data); hw_fence->debug_refcount++; @@ -1589,10 +1610,11 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ /* if hw fence already signaled, signal the client */ - if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) { + if (is_signaled) { if (fence != NULL) set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &fence->flags); - _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, *hash, 0, client_data, 0); + _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, *hash, 0, client_data, + hw_fence->error); } return 0; @@ -1623,7 +1645,7 @@ int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, } static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence *hw_fence, u64 hash, int error) + struct msm_hw_fence *hw_fence, u64 wait_client_mask, u64 hash, int error) { enum hw_fence_client_id wait_client_id; enum hw_fence_client_data_id data_id; @@ -1632,7 +1654,7 @@ static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, /* signal with an error all the waiting clients for this fence */ for (wait_client_id = 0; wait_client_id <= drv_data->rxq_clients_num; wait_client_id++) { - if (hw_fence->wait_client_mask & BIT(wait_client_id)) { + if (wait_client_mask & BIT(wait_client_id)) { hw_fence_wait_client = drv_data->clients[wait_client_id]; if (!hw_fence_wait_client) @@ -1649,6 +1671,69 @@ static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, } } +/* + * This function must be called with a signaled hw-fence; hw_fence->parents_cnt and + * hw_fence->parent_list fields are only modified for unsignaled fences + */ +static void _signal_parent_fences(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u32 parents_cnt, u64 hash, int error) +{ + struct msm_hw_fence *join_fence; + u64 parent_hash; + int i; + + if (parents_cnt > MSM_HW_FENCE_MAX_JOIN_PARENTS) { + HWFNC_ERR("hw_fence hash:%llu has invalid parents_cnt:%u max:%u\n", hash, + parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS); + parents_cnt = MSM_HW_FENCE_MAX_JOIN_PARENTS; + } + + for (i = 0; i < parents_cnt; i++) { + parent_hash = hw_fence->parent_list[i]; + join_fence = _get_hw_fence(drv_data->hw_fence_table_entries, + drv_data->hw_fences_tbl, parent_hash); + if (!join_fence) { + HWFNC_ERR("bad parent hash:%llu of child hash:%llu\n", parent_hash, hash); + continue; + } + + if (_update_and_get_join_fence_signal_status(drv_data, join_fence, error)) + /* no need to lock access to wait client mask for join fences */ + _signal_all_wait_clients(drv_data, join_fence, join_fence->wait_client_mask, + parent_hash, join_fence->error); + } +} + +/* + * Check fence signaling status. If unsignaled, + * 1. signal waiting clients, + * 2. signal parent fences (and waiting clients on parent fences) + */ +static void _signal_fence_if_unsignaled(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u64 hash, int error) +{ + u64 wait_client_mask; + u32 parents_cnt; + + /* check flags and error for signaling */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ + if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) { + /* fence is already signaled so do nothing */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); + return; + } + hw_fence->flags |= MSM_HW_FENCE_FLAG_SIGNAL; + hw_fence->error = error; + wait_client_mask = hw_fence->wait_client_mask; + parents_cnt = hw_fence->parents_cnt; + hw_fence->parents_cnt = 0; + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + + /* fields used by the following are not modified for signaled fences */ + _signal_parent_fences(drv_data, hw_fence, parents_cnt, hash, error); + _signal_all_wait_clients(drv_data, hw_fence, wait_client_mask, hash, error); +} + void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client) { @@ -1719,8 +1804,7 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, if (hw_fence->fence_allocator == hw_fence_client->client_id) { /* if fence is not signaled, signal with error all the waiting clients */ - if (!(hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL)) - _signal_all_wait_clients(drv_data, hw_fence, hash, error); + _signal_fence_if_unsignaled(drv_data, hw_fence, hash, error); if (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY) goto skip_destroy; From 155bbbea82d945b7cc26c97bb9a0228224bcce14 Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 2 Jun 2023 14:32:22 -0700 Subject: [PATCH 090/166] mm-drivers: hw_fence: add hw-fence refcount for create and signal This change adds refcounting to the hw-fence for creation and signaling. When a hw-fence is created, it has two refcounts, which are released by the hw-fence client that created the fence and the fence controller when it signals the fence. The hw-fence is destroyed when the hw-fence refcount reaches zero. Change-Id: I812bf8ce7a35df22390d0e77f10c0797152d57a3 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 22 ++- .../include/msm_hw_fence_synx_translation.h | 1 + hw_fence/src/hw_fence_drv_debug.c | 6 +- hw_fence/src/hw_fence_drv_priv.c | 145 ++++++++++++------ hw_fence/src/msm_hw_fence_synx_translation.c | 1 + 5 files changed, 126 insertions(+), 49 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index f3d088065c..2c5629c3e0 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -78,6 +78,22 @@ */ #define HW_FENCE_EVENT_MAX_DATA 12 +/** + * HW_FENCE_FCTL_REFCOUNT: + * Refcount held by Fence Controller for signaling. + * This bit in hw_fence->refcount is set during creation of a hw-fence and released when the + * hw-fence is signaled by Fence Controller. + */ +#define HW_FENCE_FCTL_REFCOUNT BIT(31) + +/** + * HW_FENCE_HLOS_REFCOUNT_MASK: + * Mask for refcounts acquired and released from HLOS. + * The field "hw_fence->refcount & HW_FENCE_HLOS_REFCOUNT_MASK" stores the number of refcounts held + * by HW Fence clients or HW Fence Driver. + */ +#define HW_FENCE_HLOS_REFCOUNT_MASK GENMASK(30, 0) + enum hw_fence_lookup_ops { HW_FENCE_LOOKUP_OP_CREATE = 0x1, HW_FENCE_LOOKUP_OP_DESTROY, @@ -463,7 +479,8 @@ struct msm_hw_fence_event { * @fence_create_time: debug info with the create time timestamp * @fence_trigger_time: debug info with the trigger time timestamp * @fence_wait_time: debug info with the register-for-wait timestamp - * @debug_refcount: refcount used for debugging + * @refcount: refcount on the hw-fence. This is split into multiple fields, see + * HW_FENCE_HLOS_REFCOUNT_MASK and HW_FENCE_FCTL_REFCOUNT for more detail * @client_data: array of data optionally passed from and returned to clients waiting on the fence * during fence signaling */ @@ -483,7 +500,7 @@ struct msm_hw_fence { u64 fence_create_time; u64 fence_trigger_time; u64 fence_wait_time; - u64 debug_refcount; + u64 refcount; u64 client_data[HW_FENCE_MAX_CLIENTS_WITH_DATA]; }; @@ -506,6 +523,7 @@ int hw_fence_destroy(struct hw_fence_driver_data *drv_data, u64 context, u64 seqno); int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 hash); +int hw_fence_destroy_refcount(struct hw_fence_driver_data *drv_data, u64 hash, u32 ref); int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array, u64 *hash_join_fence, u64 client_data); diff --git a/hw_fence/include/msm_hw_fence_synx_translation.h b/hw_fence/include/msm_hw_fence_synx_translation.h index b1724b588b..5d557bab2f 100644 --- a/hw_fence/include/msm_hw_fence_synx_translation.h +++ b/hw_fence/include/msm_hw_fence_synx_translation.h @@ -9,6 +9,7 @@ #include extern bool hw_fence_driver_enable; +extern struct hw_fence_driver_data *hw_fence_drv_data; #ifndef SYNX_HW_FENCE_CLIENT_START #define SYNX_HW_FENCE_CLIENT_START 1024 diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 31e96318d6..83b0c16bb4 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -16,7 +16,7 @@ #define HFENCE_TBL_MSG \ "[%d]hfence[%u] v:%d err:%u ctx:%llu seq:%llu wait:0x%llx alloc:%d f:0x%llx child_cnt:%d"\ - "%s ct:%llu tt:%llu wt:%llu\n" + "%s ct:%llu tt:%llu wt:%llu ref:0x%llx\n" /* each hwfence parent includes one "32-bit" element + "," separator */ #define HW_FENCE_MAX_PARENTS_SUBLIST_DUMP (MSM_HW_FENCE_MAX_JOIN_PARENTS * 9) @@ -517,7 +517,7 @@ static void _dump_fence_helper(enum hw_fence_drv_prio prio, struct msm_hw_fence count, index, hw_fence->valid, hw_fence->error, hw_fence->ctx_id, hw_fence->seq_id, hw_fence->wait_client_mask, hw_fence->fence_allocator, hw_fence->flags, hw_fence->pending_child_cnt, parents_dump, hw_fence->fence_create_time, - hw_fence->fence_trigger_time, hw_fence->fence_wait_time); + hw_fence->fence_trigger_time, hw_fence->fence_wait_time, hw_fence->refcount); } void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash, @@ -540,7 +540,7 @@ static inline int _dump_fence(struct msm_hw_fence *hw_fence, char *buf, int len, cnt, index, hw_fence->valid, hw_fence->error, hw_fence->ctx_id, hw_fence->seq_id, hw_fence->wait_client_mask, hw_fence->fence_allocator, hw_fence->flags, hw_fence->pending_child_cnt, parents_dump, hw_fence->fence_create_time, - hw_fence->fence_trigger_time, hw_fence->fence_wait_time); + hw_fence->fence_trigger_time, hw_fence->fence_wait_time, hw_fence->refcount); return ret; } diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index eb0402ddec..e4fc79dff0 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -951,7 +951,7 @@ static void _cleanup_hw_fence(struct msm_hw_fence *hw_fence) hw_fence->fence_create_time = 0; hw_fence->fence_trigger_time = 0; hw_fence->fence_wait_time = 0; - hw_fence->debug_refcount = 0; + hw_fence->refcount = 0; hw_fence->parents_cnt = 0; hw_fence->pending_child_cnt = 0; @@ -962,7 +962,7 @@ static void _cleanup_hw_fence(struct msm_hw_fence *hw_fence) } /* This function must be called with the hw fence lock */ -static void _reserve_hw_fence(struct hw_fence_driver_data *drv_data, +static int _reserve_hw_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hw_fence, u32 client_id, u64 context, u64 seqno, u32 hash, u32 pending_child_cnt) { @@ -976,28 +976,77 @@ static void _reserve_hw_fence(struct hw_fence_driver_data *drv_data, hw_fence->flags = 0; /* fence just reserved, there shouldn't be any flags set */ hw_fence->fence_allocator = client_id; hw_fence->fence_create_time = hw_fence_get_qtime(drv_data); - hw_fence->debug_refcount++; + /* one released by creating client; one released by FCTL */ + hw_fence->refcount = HW_FENCE_FCTL_REFCOUNT + 1; HWFNC_DBG_LUT("Reserved fence client:%d ctx:%llu seq:%llu hash:%u\n", client_id, context, seqno, hash); + + return 0; } /* This function must be called with the hw fence lock */ -static void _unreserve_hw_fence(struct hw_fence_driver_data *drv_data, +static int _unreserve_hw_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hw_fence, u32 client_id, u64 context, u64 seqno, u32 hash, u32 pending_child_cnt) { - _cleanup_hw_fence(hw_fence); + if (hw_fence->refcount & HW_FENCE_HLOS_REFCOUNT_MASK) + hw_fence->refcount--; + else + return -EINVAL; /* keep hw-fence in table for debugging purposes */ - /* unreserve this HW fence */ - hw_fence->valid = 0; + /* if both hlos and fctl refcounts are cleared, then delete the fence */ + if (!hw_fence->refcount) { + _cleanup_hw_fence(hw_fence); - HWFNC_DBG_LUT("Unreserved fence client:%d ctx:%llu seq:%llu hash:%u\n", - client_id, context, seqno, hash); + /* unreserve this HW fence */ + hw_fence->valid = 0; + } + + HWFNC_DBG_LUT("Unreserved fence client:%d ctx:%llu seq:%llu hash:%u refcount:%llx\n", + client_id, context, seqno, hash, hw_fence->refcount); + + return 0; +} + +int hw_fence_destroy_refcount(struct hw_fence_driver_data *drv_data, u64 hash, u32 ref) +{ + struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; + struct msm_hw_fence *hw_fence = NULL; + int ret = 0; + + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, hash); + if (!hw_fence) { + HWFNC_ERR("bad hw fence hash:%llu\n", hash); + return -EINVAL; + } + + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ + if (hw_fence->refcount & ref) { + hw_fence->refcount &= ~ref; + } else { + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + HWFNC_ERR("fence ctx:%llu seq:%llu hash:%llu ref:0x%llx before destroy ref:0x%x\n", + hw_fence->ctx_id, hw_fence->seq_id, hash, hw_fence->refcount, ref); + /* keep hw-fence in table for debugging purposes */ + return -EINVAL; + } + if (!hw_fence->refcount) { + _cleanup_hw_fence(hw_fence); + + /* unreserve this HW fence */ + hw_fence->valid = 0; + } + + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + HWFNC_DBG_H("Removed 0x%x refcount on fence hash:%llu ref:0x%llx\n", ref, hash, + hw_fence->refcount); + + return ret; } /* This function must be called with the hw fence lock */ -static void _reserve_join_fence(struct hw_fence_driver_data *drv_data, +static int _reserve_join_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hw_fence, u32 client_id, u64 context, u64 seqno, u32 hash, u32 pending_child_cnt) { @@ -1010,16 +1059,18 @@ static void _reserve_join_fence(struct hw_fence_driver_data *drv_data, hw_fence->seq_id = seqno; hw_fence->fence_allocator = client_id; hw_fence->fence_create_time = hw_fence_get_qtime(drv_data); - hw_fence->debug_refcount++; + hw_fence->refcount = HW_FENCE_FCTL_REFCOUNT; /* refcount released by FCTL */ hw_fence->pending_child_cnt = pending_child_cnt; HWFNC_DBG_LUT("Reserved join fence client:%d ctx:%llu seq:%llu hash:%u\n", client_id, context, seqno, hash); + + return 0; } /* This function must be called with the hw fence lock */ -static void _fence_found(struct hw_fence_driver_data *drv_data, +static int _fence_found(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hw_fence, u32 client_id, u64 context, u64 seqno, u32 hash, u32 pending_child_cnt) { @@ -1030,6 +1081,8 @@ static void _fence_found(struct hw_fence_driver_data *drv_data, */ HWFNC_DBG_LUT("Found fence client:%d ctx:%llu seq:%llu hash:%u\n", client_id, context, seqno, hash); + + return 0; } char *_get_op_mode(enum hw_fence_lookup_ops op_code) @@ -1055,7 +1108,7 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d u32 pending_child_cnt, enum hw_fence_lookup_ops op_code, u64 *hash) { bool (*compare_fnc)(struct msm_hw_fence *hfence, u64 context, u64 seqno); - void (*process_fnc)(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hfence, + int (*process_fnc)(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hfence, u32 client_id, u64 context, u64 seqno, u32 hash, u32 pending); struct msm_hw_fence *hw_fence = NULL; u64 step = 0; @@ -1122,8 +1175,8 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d /* Process the hw fence found by the algorithm */ if (process_fnc) { - process_fnc(drv_data, hw_fence, client_id, context, seqno, *hash, - pending_child_cnt); + ret = process_fnc(drv_data, hw_fence, client_id, context, seqno, + *hash, pending_child_cnt); /* update memory table with processing */ wmb(); @@ -1138,9 +1191,9 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d op_code == HW_FENCE_LOOKUP_OP_CREATE_JOIN) && seqno == hw_fence->seq_id && context == hw_fence->ctx_id) { /* ctx & seqno must be unique creating a hw-fence */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); HWFNC_ERR("cannot create hw fence with same ctx:%llu seqno:%llu\n", context, seqno); - GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); break; } /* compare can fail if we have a collision, we will linearly resolve it */ @@ -1150,6 +1203,10 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); + if (hw_fence_found && ret) + HWFNC_ERR("failed process_func client:%u op:%s ctx:%llu seq:%llu h:%llu\n", + client_id, _get_op_mode(op_code), context, seqno, *hash); + /* Increment step for the next loop */ step++; } @@ -1186,17 +1243,6 @@ int hw_fence_create(struct hw_fence_driver_data *drv_data, return ret; } -static inline int _hw_fence_cleanup(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence *hw_fences_tbl, u32 client_id, u64 context, u64 seqno) { - u64 hash; - - if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl, - context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_DESTROY, &hash)) - return -EINVAL; - - return 0; -} - int hw_fence_destroy(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno) @@ -1204,9 +1250,11 @@ int hw_fence_destroy(struct hw_fence_driver_data *drv_data, u32 client_id = hw_fence_client->client_id; struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; int ret = 0; + u64 hash; - /* remove hw fence from table*/ - if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno)) { + /* decrement refcount on hw-fence */ + if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl, + context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_DESTROY, &hash)) { HWFNC_ERR("Fail destroying fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); ret = -EINVAL; @@ -1218,7 +1266,7 @@ int hw_fence_destroy(struct hw_fence_driver_data *drv_data, int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 hash) { - u32 client_id = hw_fence_client->client_id; + u32 client_id = hw_fence_client ? hw_fence_client->client_id : ~0; struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; struct msm_hw_fence *hw_fence = NULL; int ret = 0; @@ -1235,13 +1283,17 @@ int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, return -EINVAL; } - /* remove hw fence from table*/ - if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, hw_fence->ctx_id, - hw_fence->seq_id)) { - HWFNC_ERR("Fail destroying fence client:%u ctx:%llu seqno:%llu hash:%llu\n", - client_id, hw_fence->ctx_id, hw_fence->seq_id, hash); + /* decrement refcount on hw-fence */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ + if (hw_fence->valid) + _unreserve_hw_fence(drv_data, hw_fence, client_id, hw_fence->ctx_id, + hw_fence->seq_id, hash, 0); + else ret = -EINVAL; - } + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + + if (ret) + HWFNC_ERR("unreserve of invalid fence hash:%llu client:%u\n", hash, client_id); return ret; } @@ -1279,11 +1331,9 @@ static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_ if (!join_fence) HWFNC_ERR("Fail to create join fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); - } else { - /* destroy the fence */ - if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno)) - HWFNC_ERR("Fail destroying join fence client:%u ctx:%llu seqno:%llu\n", - client_id, context, seqno); + } else if (hw_fence_destroy_refcount(drv_data, *hash, HW_FENCE_FCTL_REFCOUNT)) { + HWFNC_ERR("Fail destroy join fence client:%u ctx:%llu seq:%llu hash:%llu\n", + client_id, context, seqno, *hash); } return join_fence; @@ -1295,7 +1345,7 @@ struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, { struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; struct msm_hw_fence *hw_fence; - u32 client_id = hw_fence_client ? hw_fence_client->client_id : 0xff; + u32 client_id = hw_fence_client ? hw_fence_client->client_id : ~0; /* find the hw fence */ hw_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context, @@ -1600,7 +1650,6 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, is_signaled = hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL; hw_fence->wait_client_mask |= BIT(hw_fence_client->client_id); hw_fence->fence_wait_time = hw_fence_get_qtime(drv_data); - hw_fence->debug_refcount++; if (client_data) hw_fence->client_data[data_id] = client_data; @@ -1697,10 +1746,14 @@ static void _signal_parent_fences(struct hw_fence_driver_data *drv_data, continue; } - if (_update_and_get_join_fence_signal_status(drv_data, join_fence, error)) + if (_update_and_get_join_fence_signal_status(drv_data, join_fence, error)) { /* no need to lock access to wait client mask for join fences */ _signal_all_wait_clients(drv_data, join_fence, join_fence->wait_client_mask, parent_hash, join_fence->error); + + /* decrement refcount for signal on behalf of fence controller */ + hw_fence_destroy_refcount(drv_data, parent_hash, HW_FENCE_FCTL_REFCOUNT); + } } } @@ -1708,6 +1761,7 @@ static void _signal_parent_fences(struct hw_fence_driver_data *drv_data, * Check fence signaling status. If unsignaled, * 1. signal waiting clients, * 2. signal parent fences (and waiting clients on parent fences) + * 3. decrement refcount for signal on behalf of fence controller */ static void _signal_fence_if_unsignaled(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hw_fence, u64 hash, int error) @@ -1732,6 +1786,9 @@ static void _signal_fence_if_unsignaled(struct hw_fence_driver_data *drv_data, /* fields used by the following are not modified for signaled fences */ _signal_parent_fences(drv_data, hw_fence, parents_cnt, hash, error); _signal_all_wait_clients(drv_data, hw_fence, wait_client_mask, hash, error); + + /* remove ref held by fence controller to signal hw-fence */ + hw_fence_destroy_refcount(drv_data, hash, HW_FENCE_FCTL_REFCOUNT); } void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 6bffd4c423..f875826874 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -189,6 +189,7 @@ int synx_hwfence_create(struct synx_session *session, struct synx_create_params if (handle > U32_MAX) { HWFNC_ERR("synx_id:%d fence handle:%llu would overflow h_synx\n", session->type, handle); + hw_fence_destroy_refcount(hw_fence_drv_data, handle, HW_FENCE_FCTL_REFCOUNT); msm_hw_fence_destroy_with_handle(session->client, handle); return -SYNX_INVALID; } From 1e05058867cf9203377b43a4ba9e177562330f43 Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 9 Jun 2023 16:07:07 -0700 Subject: [PATCH 091/166] mm-drivers: hw_fence: add dma-fence signal callback This change adds a dma-fence signal callback for dma-fences backed by hw-fences. To ensure that the hw-fence is not destroyed until the dma-fence is signaled, a refcount on the hw-fence is stored for the dma-fence signal callback. When the dma-fence is signaled, the hw-fence refcount is decremented by one. If the hw-fence has not been signaled, signal the hw-fence from the HW Fence Driver. Change-Id: I0d511f9c9c64f12ea2affe63fb221de2a267331e Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 14 +++++ hw_fence/src/hw_fence_drv_priv.c | 92 ++++++++++++++++++++++++++-- hw_fence/src/msm_hw_fence.c | 14 ++++- 3 files changed, 115 insertions(+), 5 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 2c5629c3e0..c802ef76e5 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -281,6 +281,19 @@ struct hw_fence_client_queue_desc { u32 start_offset; }; +/** + * struct hw_fence_signal_cb - Structure holding hw-fence callback data for dma-fence callback + * + * @fence_cb: fence callback data structure used to add dma_fence_callback + * @drv_data: structure holding internal hw-fence driver data + * @hash: hash of hw-fence to decrement refcount in dma-fence callback + */ +struct hw_fence_signal_cb { + struct dma_fence_cb fence_cb; + struct hw_fence_driver_data *drv_data; + u64 hash; +}; + /** * struct hw_fence_driver_data - Structure holding internal hw-fence driver data * @@ -518,6 +531,7 @@ void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, int hw_fence_create(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash); +int hw_fence_add_callback(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, u64 hash); int hw_fence_destroy(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno); diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index e4fc79dff0..de36cb8abc 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1761,10 +1761,10 @@ static void _signal_parent_fences(struct hw_fence_driver_data *drv_data, * Check fence signaling status. If unsignaled, * 1. signal waiting clients, * 2. signal parent fences (and waiting clients on parent fences) - * 3. decrement refcount for signal on behalf of fence controller + * 3. decrement refcount for signal on behalf of fence controller (if release_ref is true) */ static void _signal_fence_if_unsignaled(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence *hw_fence, u64 hash, int error) + struct msm_hw_fence *hw_fence, u64 hash, int error, bool release_ref) { u64 wait_client_mask; u32 parents_cnt; @@ -1788,7 +1788,8 @@ static void _signal_fence_if_unsignaled(struct hw_fence_driver_data *drv_data, _signal_all_wait_clients(drv_data, hw_fence, wait_client_mask, hash, error); /* remove ref held by fence controller to signal hw-fence */ - hw_fence_destroy_refcount(drv_data, hash, HW_FENCE_FCTL_REFCOUNT); + if (release_ref) + hw_fence_destroy_refcount(drv_data, hash, HW_FENCE_FCTL_REFCOUNT); } void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, @@ -1861,7 +1862,7 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, if (hw_fence->fence_allocator == hw_fence_client->client_id) { /* if fence is not signaled, signal with error all the waiting clients */ - _signal_fence_if_unsignaled(drv_data, hw_fence, hash, error); + _signal_fence_if_unsignaled(drv_data, hw_fence, hash, error, true); if (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY) goto skip_destroy; @@ -1905,3 +1906,86 @@ enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id return data_id; } + +static void msm_hw_fence_signal_callback(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct hw_fence_driver_data *drv_data; + struct hw_fence_signal_cb *signal_cb; + struct msm_hw_fence *hw_fence; + u64 hash; + + if (!fence || !cb) { + HWFNC_ERR("Invalid params fence:0x%pK cb:0x%pK\n", fence, cb); + return; + } + + signal_cb = (struct hw_fence_signal_cb *)cb; + drv_data = signal_cb->drv_data; + hash = signal_cb->hash; + if (!drv_data) { + HWFNC_ERR("invalid signal_cb params\n"); + return; + } + HWFNC_DBG_IRQ("dma-fence signal callback ctx:%llu seqno:%llu flags:%lx err:%d\n", + fence->context, fence->seqno, fence->flags, fence->error); + + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, drv_data->hw_fences_tbl, hash); + if (!hw_fence) { + HWFNC_ERR("bad hw fence hash:%llu\n", hash); + goto error; + } + + if (hw_fence->ctx_id != fence->context || hw_fence->seq_id != fence->seqno) { + HWFNC_ERR("invalid hfence hash:%llu ctx:%llu seq:%llu expected ctx:%llu seq:%llu\n", + hash, hw_fence->ctx_id, hw_fence->seq_id, fence->context, fence->seqno); + goto error; + } + + /* if unsignaled, signal but do not release ref held by FCTL */ + _signal_fence_if_unsignaled(drv_data, hw_fence, hash, fence->error, false); + hw_fence_destroy_with_hash(drv_data, NULL, hash); /* release ref held by dma-fence signal */ +error: + kfree(signal_cb); +} + +int hw_fence_add_callback(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, u64 hash) +{ + struct hw_fence_signal_cb *signal_cb; + struct msm_hw_fence *hw_fence; + int ret; + + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, drv_data->hw_fences_tbl, hash); + if (!hw_fence) { + HWFNC_ERR("Failed to find hw-fence for hash:%llu\n", hash); + return -EINVAL; + } + + signal_cb = kzalloc(sizeof(*signal_cb), GFP_KERNEL); + if (!signal_cb) + return -ENOMEM; + + signal_cb->drv_data = drv_data; + signal_cb->hash = hash; + + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); + hw_fence->refcount++; + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); + + ret = dma_fence_add_callback(fence, &signal_cb->fence_cb, msm_hw_fence_signal_callback); + if (ret) { + if (dma_fence_is_signaled(fence)) { + HWFNC_DBG_IRQ("dma_fence is signaled ctx:%llu seq:%llu flags:%lx err:%d\n", + fence->context, fence->seqno, fence->flags, fence->error); + msm_hw_fence_signal_callback(fence, &signal_cb->fence_cb); + ret = 0; + } else { + HWFNC_ERR("failed to add signal_cb ctx:%llu seq:%llu f:%lx err:%d ret:%d\n", + fence->context, fence->seqno, fence->flags, fence->error, ret); + /* release ref held by dma-fence signal */ + hw_fence_destroy_with_hash(drv_data, NULL, hash); + kfree(signal_cb); + } + } + + return ret; +} diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index be96d3f842..c64a2ca01e 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -210,12 +210,24 @@ int msm_hw_fence_create(void *client_handle, return ret; } + ret = hw_fence_add_callback(hw_fence_drv_data, fence, *params->handle); + if (ret) { + HWFNC_ERR("Fail to add dma-fence signal cb client:%d ctx:%llu seq:%llu ret:%d\n", + hw_fence_client->client_id, fence->context, fence->seqno, ret); + /* release both refs, one held by fctl and one held by creating client */ + hw_fence_destroy_refcount(hw_fence_drv_data, *params->handle, + HW_FENCE_FCTL_REFCOUNT); + hw_fence_destroy_with_hash(hw_fence_drv_data, hw_fence_client, *params->handle); + + return ret; + } + /* If no error, set the HW Fence Flag in the dma-fence */ set_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags); HWFNC_DBG_H("-\n"); - return 0; + return ret; } EXPORT_SYMBOL_GPL(msm_hw_fence_create); From c71f0f6e8a71795598307ddb6e312bd8afd97615 Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 24 Jul 2023 17:38:52 -0700 Subject: [PATCH 092/166] mm-drivers: hw_fence: add hw-fence refcount for import This change adds hw-fence refcounting for synx_import. Clients which call synx_import must call synx_release after passing the hw-fence handle to firmware. Change-Id: I86101959ce2674697220565b3762e877d9d26a16 Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_debug.c | 1 + hw_fence/src/hw_fence_drv_priv.c | 56 ++++++++++++++++++++++--------- hw_fence/src/hw_fence_ioctl.c | 2 +- hw_fence/src/msm_hw_fence.c | 46 ++++++++++++++++++++++--- 4 files changed, 83 insertions(+), 22 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 83b0c16bb4..22ff613e1a 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -581,6 +581,7 @@ static int dump_single_entry(struct hw_fence_driver_data *drv_data, char *buf, u } len = _dump_fence(hw_fence, buf, len, max_size, hash, 0); + hw_fence_destroy_with_hash(drv_data, NULL, hash); /* release ref from msm_hw_fence_find */ exit: /* move idx to end of table to stop the dump */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index de36cb8abc..0624a058f1 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1059,7 +1059,8 @@ static int _reserve_join_fence(struct hw_fence_driver_data *drv_data, hw_fence->seq_id = seqno; hw_fence->fence_allocator = client_id; hw_fence->fence_create_time = hw_fence_get_qtime(drv_data); - hw_fence->refcount = HW_FENCE_FCTL_REFCOUNT; /* refcount released by FCTL */ + /* one released by importing client; one released by FCTL */ + hw_fence->refcount = HW_FENCE_FCTL_REFCOUNT + 1; hw_fence->pending_child_cnt = pending_child_cnt; @@ -1074,13 +1075,18 @@ static int _fence_found(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hw_fence, u32 client_id, u64 context, u64 seqno, u32 hash, u32 pending_child_cnt) { + if ((hw_fence->refcount & HW_FENCE_HLOS_REFCOUNT_MASK) == HW_FENCE_HLOS_REFCOUNT_MASK) + return -EINVAL; + /* - * Do nothing, when this find fence fn is invoked, all processing is done outside. - * Currently just keeping this function for debugging purposes, can be removed - * in final versions + * Increment the hw-fence refcount. All other processing is done outside. After processing + * is done, the refcount needs to be decremented either explicitly by the client or as part + * of processing in HW Fence Driver. */ - HWFNC_DBG_LUT("Found fence client:%d ctx:%llu seq:%llu hash:%u\n", - client_id, context, seqno, hash); + + hw_fence->refcount++; + HWFNC_DBG_LUT("Found fence client:%d ctx:%llu seq:%llu hash:%u ref:0x%llx\n", + client_id, context, seqno, hash, hw_fence->refcount); return 0; } @@ -1277,12 +1283,6 @@ int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, return -EINVAL; } - if (hw_fence->fence_allocator != client_id) { - HWFNC_ERR("client:%u cannot destroy fence hash:%llu fence_allocator:%u\n", - client_id, hash, hw_fence->fence_allocator); - return -EINVAL; - } - /* decrement refcount on hw-fence */ GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ if (hw_fence->valid) @@ -1444,6 +1444,9 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data wmb(); } } + /* decrement refcount acquired by finding fence */ + _unreserve_hw_fence(drv_data, hw_fence_child, hw_fence_client->client_id, + hw_fence_child->ctx_id, hw_fence_child->seq_id, hash, 0); GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */ } @@ -1564,6 +1567,11 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, hw_fence_child->parents_cnt); hw_fence_child->parents_cnt--; + /* decrement refcount acquired by finding fence */ + _unreserve_hw_fence(drv_data, hw_fence_child, + hw_fence_client->client_id, hw_fence_child->ctx_id, + hw_fence_child->seq_id, hash, 0); + /* update memory for the table update */ wmb(); @@ -1575,10 +1583,13 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, hw_fence_child->parent_list[hw_fence_child->parents_cnt - 1] = *hash_join_fence; - - /* update memory for the table update */ - wmb(); } + /* decrement refcount acquired by finding fence */ + _unreserve_hw_fence(drv_data, hw_fence_child, hw_fence_client->client_id, + hw_fence_child->ctx_id, hw_fence_child->seq_id, hash, 0); + + /* update memory for the table update */ + wmb(); GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */ } @@ -1620,6 +1631,14 @@ error_array: return -EINVAL; } +/** + * Registers the hw-fence client for wait on a hw-fence and keeps a reference on that hw-fence. + * The hw-fence must be explicitly dereferenced following this function, e.g. by client + * synx_release call. + * + * Note: This is the only place where the hw-fence refcount is retained for the client to release. + * In all other places, the HW Fence Driver releases the refcount held for processing. + */ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash, u64 client_data) @@ -1637,7 +1656,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, } } - /* find the hw fence within the table */ + /* refcount from msm_hw_fence_find must be explicitly released outside this function call */ hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, hash); if (!hw_fence) { HWFNC_ERR("Cannot find fence!\n"); @@ -1854,6 +1873,11 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, hw_fence->seq_id); hw_fence->wait_client_mask &= ~BIT(hw_fence_client->client_id); + /* remove reference held by waiting client */ + if (!(reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY)) + _unreserve_hw_fence(drv_data, hw_fence, hw_fence_client->client_id, + hw_fence->ctx_id, hw_fence->seq_id, hash, 0); + /* update memory for the table update */ wmb(); } diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index ebfcd816e8..7922651852 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -442,7 +442,7 @@ static long hw_sync_ioctl_reg_for_wait(struct hw_sync_obj *obj, unsigned long ar return -EINVAL; } - ret = msm_hw_fence_wait_update_v2(obj->client_handle, &fence, NULL, NULL, num_fences, 1); + ret = msm_hw_fence_wait_update(obj->client_handle, &fence, num_fences, 1); /* Decrement the refcount that hw_sync_get_fence increments */ dma_fence_put(fence); diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index c64a2ca01e..a91dc32708 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -314,7 +314,7 @@ int msm_hw_fence_wait_update_v2(void *client_handle, { struct msm_hw_fence_client *hw_fence_client; struct dma_fence_array *array; - int i, ret = 0; + int i, j, destroy_ret, ret = 0; enum hw_fence_client_data_id data_id; if (IS_ERR_OR_NULL(client_handle) || !fence_list || !*fence_list) { @@ -358,7 +358,7 @@ int msm_hw_fence_wait_update_v2(void *client_handle, array, &hash, client_data); if (ret) { HWFNC_ERR("Failed to process FenceArray\n"); - return ret; + goto error; } } else { /* Process individual Fence */ @@ -366,7 +366,7 @@ int msm_hw_fence_wait_update_v2(void *client_handle, &hash, client_data); if (ret) { HWFNC_ERR("Failed to process Fence\n"); - return ret; + goto error; } } @@ -377,14 +377,48 @@ int msm_hw_fence_wait_update_v2(void *client_handle, HWFNC_DBG_H("-\n"); return 0; +error: + for (j = 0; j < i; j++) { + destroy_ret = hw_fence_destroy_with_hash(hw_fence_drv_data, hw_fence_client, + handles[j]); + if (destroy_ret) + HWFNC_ERR("Failed decr fence ref ctx:%llu seq:%llu h:%llu idx:%d ret:%d\n", + fence_list[j] ? fence_list[j]->context : -1, fence_list[j] ? + fence_list[j]->seqno : -1, handles[j], j, destroy_ret); + } + + return ret; } EXPORT_SYMBOL_GPL(msm_hw_fence_wait_update_v2); int msm_hw_fence_wait_update(void *client_handle, struct dma_fence **fence_list, u32 num_fences, bool create) { - return msm_hw_fence_wait_update_v2(client_handle, fence_list, NULL, NULL, num_fences, - create); + u64 handle; + int i, ret = 0; + + for (i = 0; i < num_fences; i++) { + ret = msm_hw_fence_wait_update_v2(client_handle, &fence_list[i], &handle, NULL, + 1, create); + + if (ret) { + HWFNC_ERR("Failed reg for wait on fence ctx:%llu seq:%llu idx:%d ret:%d\n", + fence_list[i] ? fence_list[i]->context : -1, + fence_list[i] ? fence_list[i]->seqno : -1, i, ret); + return ret; + } + + /* decrement reference on hw-fence acquired by msm_hw_fence_wait_update_v2 call */ + ret = msm_hw_fence_destroy_with_handle(client_handle, handle); + if (ret) { + HWFNC_ERR("Failed decr fence ref ctx:%llu seq:%llu h:%llu idx:%d ret:%d\n", + fence_list[i] ? fence_list[i]->context : -1, + fence_list[i] ? fence_list[i]->seqno : -1, handle, i, ret); + return ret; + } + } + + return ret; } EXPORT_SYMBOL_GPL(msm_hw_fence_wait_update); @@ -649,6 +683,8 @@ int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence) return -EINVAL; } hw_fence_debug_dump_fence(HW_FENCE_PRINTK, hw_fence, hash, 0); + /* release refcount acquired by finding fence */ + msm_hw_fence_destroy_with_handle(client_handle, hash); return 0; } From afbd54b0f643fd0d0674c84febe4a4a3ce92f0c3 Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 29 Sep 2023 11:22:11 -0700 Subject: [PATCH 093/166] mm-drivers: hw_fence: support deletion of signaled hw-fences Add support to wait on signaled dma-fences that may have had their backing hw-fence deleted. If the hw-fence has been deleted, a new hw-fence is created with signaling status matching the dma-fence. Change-Id: I023b5659dac5a93563d99c58d69b05e35838968a Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 8 +- hw_fence/src/hw_fence_drv_priv.c | 156 +++++++++++++++++++++++---- 2 files changed, 141 insertions(+), 23 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index c802ef76e5..386846dd8c 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -57,8 +57,11 @@ /** * msm hw fence flags: * MSM_HW_FENCE_FLAG_SIGNAL - Flag set when the hw-fence is signaled + * MSM_HW_FENCE_FLAG_CREATE_SIGNALED - Flag set when the hw-fence is created to back a signaled + * dma-fence whose hw-fence has been destroyed */ -#define MSM_HW_FENCE_FLAG_SIGNAL BIT(0) +#define MSM_HW_FENCE_FLAG_SIGNAL BIT(0) +#define MSM_HW_FENCE_FLAG_CREATE_SIGNALED BIT(1) /** * MSM_HW_FENCE_MAX_JOIN_PARENTS: @@ -560,6 +563,9 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash); +struct msm_hw_fence *hw_fence_find_with_dma_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash, + bool *is_signaled, bool create); enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id client_id); #endif /* __HW_FENCE_DRV_INTERNAL_H */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 0624a058f1..6f37b7c716 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -26,6 +26,9 @@ #define IDX_TRANSLATE_DEFAULT_TO_CUSTOM(queue, idx) \ (((idx) / (queue)->rd_wr_idx_factor) + (queue)->rd_wr_idx_start) +/* number of fences searched for HW Fence import */ +#define HW_FENCE_FIND_THRESHOLD 10 + inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data) { #ifdef HWFENCE_USE_SLEEP_TIMER @@ -1109,15 +1112,16 @@ char *_get_op_mode(enum hw_fence_lookup_ops op_code) return "UNKNOWN"; } -struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *drv_data, +struct msm_hw_fence *_hw_fence_lookup_and_process_range(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hw_fences_tbl, u64 context, u64 seqno, u32 client_id, - u32 pending_child_cnt, enum hw_fence_lookup_ops op_code, u64 *hash) + u32 pending_child_cnt, enum hw_fence_lookup_ops op_code, u64 *hash, u64 flags, + u64 start_step, u64 end_step) { bool (*compare_fnc)(struct msm_hw_fence *hfence, u64 context, u64 seqno); int (*process_fnc)(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hfence, u32 client_id, u64 context, u64 seqno, u32 hash, u32 pending); struct msm_hw_fence *hw_fence = NULL; - u64 step = 0; + u64 step = start_step; int ret = 0; bool hw_fence_found = false; @@ -1126,7 +1130,12 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d return NULL; } - *hash = ~0; + /* + * When start_step != 0, the hash is already initialized at the correct value and should + * not be reset. + */ + if (!step) + *hash = ~0; HWFNC_DBG_LUT("hw_fence_lookup: %d\n", op_code); @@ -1152,7 +1161,7 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d return NULL; } - while (!hw_fence_found && (step < drv_data->hw_fence_table_entries)) { + while (!hw_fence_found && (step < end_step)) { /* Calculate the Hash for the Fence */ ret = _calculate_hash(drv_data->hw_fence_table_entries, context, seqno, step, hash); @@ -1196,10 +1205,19 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d if ((op_code == HW_FENCE_LOOKUP_OP_CREATE || op_code == HW_FENCE_LOOKUP_OP_CREATE_JOIN) && seqno == hw_fence->seq_id && context == hw_fence->ctx_id) { - /* ctx & seqno must be unique creating a hw-fence */ + if (flags & MSM_HW_FENCE_FLAG_CREATE_SIGNALED) { + /* hw-fence created for importing client */ + ret = _fence_found(drv_data, hw_fence, client_id, context, + seqno, *hash, pending_child_cnt); + hw_fence_found = true; + } else { + ret = -EALREADY; + } GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); - HWFNC_ERR("cannot create hw fence with same ctx:%llu seqno:%llu\n", - context, seqno); + if (ret == -EALREADY) + HWFNC_ERR("can't create hfence w/ same ctx:%llu seq:%llu\n", + context, seqno); + break; } /* compare can fail if we have a collision, we will linearly resolve it */ @@ -1219,7 +1237,7 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d /* If we iterated through the whole list and didn't find the fence, return null */ if (!hw_fence_found) { - HWFNC_ERR("fail to create hw-fence step:%llu\n", step); + HWFNC_DBG_LUT("fail to process hw-fence op_code:%d step:%llu\n", op_code, step); hw_fence = NULL; } @@ -1229,6 +1247,15 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d return hw_fence; } +struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fences_tbl, u64 context, u64 seqno, u32 client_id, + u32 pending_child_cnt, enum hw_fence_lookup_ops op_code, u64 *hash, u64 flags) +{ + return _hw_fence_lookup_and_process_range(drv_data, hw_fences_tbl, context, seqno, + client_id, pending_child_cnt, op_code, hash, flags, 0, + drv_data->hw_fence_table_entries); +} + int hw_fence_create(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash) @@ -1240,7 +1267,7 @@ int hw_fence_create(struct hw_fence_driver_data *drv_data, /* allocate hw fence in table */ if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl, - context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_CREATE, hash)) { + context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_CREATE, hash, 0)) { HWFNC_ERR("Fail to create fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); ret = -EINVAL; @@ -1260,7 +1287,7 @@ int hw_fence_destroy(struct hw_fence_driver_data *drv_data, /* decrement refcount on hw-fence */ if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl, - context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_DESTROY, &hash)) { + context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_DESTROY, &hash, 0)) { HWFNC_ERR("Fail destroying fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); ret = -EINVAL; @@ -1327,7 +1354,8 @@ static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_ if (create) { /* allocate the fence */ join_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context, - seqno, client_id, pending_child_cnt, HW_FENCE_LOOKUP_OP_CREATE_JOIN, hash); + seqno, client_id, pending_child_cnt, HW_FENCE_LOOKUP_OP_CREATE_JOIN, hash, + 0); if (!join_fence) HWFNC_ERR("Fail to create join fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); @@ -1349,7 +1377,7 @@ struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, /* find the hw fence */ hw_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context, - seqno, client_id, 0, HW_FENCE_LOOKUP_OP_FIND_FENCE, hash); + seqno, client_id, 0, HW_FENCE_LOOKUP_OP_FIND_FENCE, hash, 0); if (!hw_fence) HWFNC_ERR("Fail to find hw fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); @@ -1396,6 +1424,7 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data { struct dma_fence *child_fence; struct msm_hw_fence *hw_fence_child; + bool child_is_signaled; int idx, j; u64 hash = 0; @@ -1410,9 +1439,11 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data continue; } - hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context, - child_fence->seqno, &hash); - if (!hw_fence_child) { + hw_fence_child = hw_fence_find_with_dma_fence(drv_data, hw_fence_client, + child_fence, &hash, &child_is_signaled, false); + if (child_is_signaled) { + continue; + } else if (!hw_fence_child) { HWFNC_ERR("Cannot cleanup child fence context:%llu seqno:%llu hash:%llu\n", child_fence->context, child_fence->seqno, hash); @@ -1489,7 +1520,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *join_fence; struct msm_hw_fence *hw_fence_child; struct dma_fence *child_fence; - bool signal_join_fence = false; + bool child_is_signaled, signal_join_fence = false; u64 hash; int i, ret = 0; enum hw_fence_client_data_id data_id; @@ -1524,6 +1555,12 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, for (i = 0; i < array->num_fences; i++) { child_fence = array->fences[i]; + if (!child_fence) { + HWFNC_ERR("NULL child fence at index:%d for fence array\n", i); + ret = -EINVAL; + goto error_array; + } + /* Nested fence-arrays are not supported */ if (to_dma_fence_array(child_fence)) { HWFNC_ERR("This is a nested fence, fail!\n"); @@ -1539,9 +1576,13 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, } /* Find the HW Fence in the Global Table */ - hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context, - child_fence->seqno, &hash); - if (!hw_fence_child) { + hw_fence_child = hw_fence_find_with_dma_fence(drv_data, hw_fence_client, + child_fence, &hash, &child_is_signaled, false); + if (child_is_signaled) { + signal_join_fence = _update_and_get_join_fence_signal_status(drv_data, + join_fence, child_fence->error); + continue; + } else if (!hw_fence_child) { HWFNC_ERR("Cannot find child fence context:%llu seqno:%llu hash:%llu\n", child_fence->context, child_fence->seqno, hash); ret = -EINVAL; @@ -1656,8 +1697,12 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, } } - /* refcount from msm_hw_fence_find must be explicitly released outside this function call */ - hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, hash); + /* refcount from finding fence must be explicitly released outside this function call */ + if (fence) + hw_fence = hw_fence_find_with_dma_fence(drv_data, hw_fence_client, fence, hash, + &is_signaled, true); + else + hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, hash); if (!hw_fence) { HWFNC_ERR("Cannot find fence!\n"); return -EINVAL; @@ -1811,6 +1856,73 @@ static void _signal_fence_if_unsignaled(struct hw_fence_driver_data *drv_data, hw_fence_destroy_refcount(drv_data, hash, HW_FENCE_FCTL_REFCOUNT); } +struct msm_hw_fence *_create_signaled_hw_fence(struct hw_fence_driver_data *drv_data, + u32 client_id, struct dma_fence *fence, u64 *hash) +{ + struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; + struct msm_hw_fence *hw_fence; + + /* create new hw-fence for signaled dma-fence */ + hw_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, + fence->context, fence->seqno, client_id, 0, HW_FENCE_LOOKUP_OP_CREATE, hash, + MSM_HW_FENCE_FLAG_CREATE_SIGNALED); + if (hw_fence) { + _signal_fence_if_unsignaled(drv_data, hw_fence, *hash, fence->error, true); + HWFNC_DBG_H("created hw-fence to back signaled fence client:%u ctx:%llu seq:%llu\n", + client_id, fence->context, fence->seqno); + } else { + HWFNC_ERR("Fail to create signaled hfence client:%u ctx:%llu seq:%llu\n", client_id, + fence->context, fence->seqno); + } + + return hw_fence; +} + +/* finds hw-fence in HW Fence table if present; if not and create==true, create a new hw-fence */ +struct msm_hw_fence *hw_fence_find_with_dma_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash, + bool *is_signaled, bool create) +{ + u32 step, end_step, client_id = hw_fence_client ? hw_fence_client->client_id : 0xff; + struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; + struct msm_hw_fence *hw_fence = NULL; + + if (!create && dma_fence_is_signaled(fence)) { + /* signaled dma-fence may have been removed from table */ + *is_signaled = true; + return NULL; + } + + for (step = 0; step < drv_data->hw_fence_table_entries; step += HW_FENCE_FIND_THRESHOLD) { + end_step = (step + HW_FENCE_FIND_THRESHOLD > drv_data->hw_fence_table_entries) ? + drv_data->hw_fence_table_entries : step + HW_FENCE_FIND_THRESHOLD; + hw_fence = _hw_fence_lookup_and_process_range(drv_data, hw_fences_tbl, + fence->context, fence->seqno, client_id, 0, HW_FENCE_LOOKUP_OP_FIND_FENCE, + hash, 0, step, end_step); + if (hw_fence) { + /* successfully found backing hw-fence*/ + *is_signaled = false; + return hw_fence; + } + if (dma_fence_is_signaled(fence)) { + /* signaled dma-fence may have been removed from table */ + *is_signaled = true; + return create ? _create_signaled_hw_fence(drv_data, client_id, fence, hash) + : NULL; + } + } + + /* + * The dma-fence signal callback holds a hw-fence refcount until dma-fence signal. If we hit + * this condition (unable to find unsignaled dma-fence with HW Fencing enabled), then the + * hw-fence has been incorrectly released early by someone who did not own the reference. + */ + HWFNC_ERR("Can't find backing hwfence for dma-fence client:%u ctx:%llu seq:%llu f:0x%lx\n", + client_id, fence->context, fence->seqno, fence->flags); + *is_signaled = false; + return NULL; +} + void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client) { From d15a6292e509abcbf76146b5c3d2eff379903770 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 13 Sep 2023 20:51:06 -0700 Subject: [PATCH 094/166] mm-drivers: hw_fence: allow clients to initialize without mem descriptor Some hw-fence client drivers might not need to receive the memory descriptor right during the initial registration. Allow registration of a hw-fence client without the memory descriptor pointer. Change-Id: I7e2169ca1a9560043259d2c611f1fe1d1720c81a Signed-off-by: Ingrid Gallardo Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_priv.c | 7 +++++-- hw_fence/src/msm_hw_fence.c | 5 ++--- hw_fence/src/msm_hw_fence_synx_translation.c | 5 ++--- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 6f37b7c716..a0f962044b 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -756,8 +756,11 @@ int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data, } /* Init client memory descriptor */ - memcpy(mem_descriptor, &hw_fence_client->mem_descriptor, - sizeof(struct msm_hw_fence_mem_addr)); + if (!IS_ERR_OR_NULL(mem_descriptor)) + memcpy(mem_descriptor, &hw_fence_client->mem_descriptor, + sizeof(struct msm_hw_fence_mem_addr)); + else + HWFNC_DBG_L("null mem descriptor, skipping copy\n"); exit: return ret; diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index a91dc32708..95176443e8 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -34,9 +34,8 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, return ERR_PTR(-EAGAIN); } - if (!mem_descriptor || client_id_ext >= HW_FENCE_CLIENT_MAX) { - HWFNC_ERR("Invalid params: %d client_id_ext:%d\n", - !mem_descriptor, client_id_ext); + if (client_id_ext >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid client_id_ext:%d\n", client_id_ext); return ERR_PTR(-EINVAL); } diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index f875826874..3ddc4a26e3 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -104,9 +104,8 @@ struct synx_session *synx_hwfence_initialize(struct synx_initialization_params * if (!hw_fence_driver_enable) return ERR_PTR(-SYNX_INVALID); - if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->ptr)) { - HWFNC_ERR("invalid params:0x%pK params->ptr:0x%pK\n", params, - IS_ERR_OR_NULL(params) ? NULL : params->ptr); + if (IS_ERR_OR_NULL(params)) { + HWFNC_ERR("invalid params:0x%pK\n", params); return ERR_PTR(-SYNX_INVALID); } From 5ed4534e872c0cd2582ab3dbe1e85427cc39e5e2 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 4 Oct 2023 10:10:24 -0700 Subject: [PATCH 095/166] mm-drivers: hw_fence: add support to create hw-fences with internal dma-fences Currently, client drivers must pass a dma-fence to create a hw-fence. This change removes this requirement and expands the 'msm_hw_fence_create' api to allow clients to request a hw-fence creation without a dma-fence. For such hw-fences, the hw-fence driver internally creates and maintains the internal dma-fence. Change-Id: Ibc67e4ac6de4c94ed63cb5c8c9c5084bfa16f7f8 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_debug.h | 94 ----- hw_fence/include/hw_fence_drv_fence.h | 110 ++++++ hw_fence/include/hw_fence_drv_priv.h | 45 ++- hw_fence/src/hw_fence_drv_debug.c | 52 +-- hw_fence/src/hw_fence_drv_priv.c | 392 +++++++++++++++---- hw_fence/src/hw_fence_ioctl.c | 27 +- hw_fence/src/msm_hw_fence.c | 29 +- hw_fence/src/msm_hw_fence_synx_translation.c | 15 +- 8 files changed, 547 insertions(+), 217 deletions(-) create mode 100644 hw_fence/include/hw_fence_drv_fence.h diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index 4f754b7c28..346859e9e2 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -8,8 +8,6 @@ #include "hw_fence_drv_ipc.h" -#define HW_FENCE_NAME_SIZE 64 - enum hw_fence_drv_prio { HW_FENCE_HIGH = 0x000001, /* High density debug messages (noisy) */ HW_FENCE_LOW = 0x000002, /* Low density debug messages */ @@ -116,98 +114,6 @@ static const struct hw_fence_out_clients_map {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26}, /* VAL5 */ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27}, /* VAL6 */ }; - -/** - * struct hw_dma_fence - fences created by hw-fence for debugging. - * @base: base dma-fence structure, this must remain at beginning of the struct. - * @name: name of each fence. - * @client_handle: handle for the client owner of this fence, this is returned by the hw-fence - * driver after a successful registration of the client and used by this fence - * during release. - */ -struct hw_dma_fence { - struct dma_fence base; - char name[HW_FENCE_NAME_SIZE]; - void *client_handle; -}; - -static inline struct hw_dma_fence *to_hw_dma_fence(struct dma_fence *fence) -{ - return container_of(fence, struct hw_dma_fence, base); -} - -static inline void _cleanup_fences(int i, struct dma_fence **fences, spinlock_t **fences_lock) -{ - struct hw_dma_fence *dma_fence; - int fence_idx; - - for (fence_idx = i; fence_idx >= 0 ; fence_idx--) { - kfree(fences_lock[fence_idx]); - - dma_fence = to_hw_dma_fence(fences[fence_idx]); - kfree(dma_fence); - } - - kfree(fences_lock); - kfree(fences); -} - -static const char *hw_fence_dbg_get_driver_name(struct dma_fence *fence) -{ - struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); - - return hw_dma_fence->name; -} - -static const char *hw_fence_dbg_get_timeline_name(struct dma_fence *fence) -{ - struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); - - return hw_dma_fence->name; -} - -static bool hw_fence_dbg_enable_signaling(struct dma_fence *fence) -{ - return true; -} - -static void _hw_fence_release(struct hw_dma_fence *hw_dma_fence) -{ - if (IS_ERR_OR_NULL(hw_dma_fence->client_handle)) { - HWFNC_ERR("invalid hwfence data, won't release hw_fence!\n"); - return; - } - - /* release hw-fence */ - if (msm_hw_fence_destroy(hw_dma_fence->client_handle, &hw_dma_fence->base)) - HWFNC_ERR("failed to release hw_fence!\n"); -} - -static void hw_fence_dbg_release(struct dma_fence *fence) -{ - struct hw_dma_fence *hw_dma_fence; - - if (!fence) - return; - - HWFNC_DBG_H("release backing fence %pK\n", fence); - hw_dma_fence = to_hw_dma_fence(fence); - - if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) - _hw_fence_release(hw_dma_fence); - - kfree(fence->lock); - kfree(hw_dma_fence); -} - -static struct dma_fence_ops hw_fence_dbg_ops = { - .get_driver_name = hw_fence_dbg_get_driver_name, - .get_timeline_name = hw_fence_dbg_get_timeline_name, - .enable_signaling = hw_fence_dbg_enable_signaling, - .wait = dma_fence_default_wait, - .release = hw_fence_dbg_release, -}; - #endif /* CONFIG_DEBUG_FS */ #endif /* __HW_FENCE_DRV_DEBUG */ diff --git a/hw_fence/include/hw_fence_drv_fence.h b/hw_fence/include/hw_fence_drv_fence.h new file mode 100644 index 0000000000..1a6689883d --- /dev/null +++ b/hw_fence/include/hw_fence_drv_fence.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __HW_FENCE_DRV_HW_DMA_FENCE +#define __HW_FENCE_DRV_HW_DMA_FENCE + +#define HW_FENCE_NAME_SIZE 64 + +/** + * struct hw_dma_fence - fences internally created by hw-fence driver. + * @base: base dma-fence structure, this must remain at beginning of the struct. + * @name: name of each fence. + * @client_handle: handle for the client owner of this fence, this is returned by the hw-fence + * driver after a successful registration of the client and used by this fence + * during release. + * @data: internal data to process the fence ops. + * @dma_fence_key: key for the dma-fence hash table. + * @is_internal: true if this fence is initialized internally by hw-fence driver, false otherwise + * @signal_cb: drv_data, hash, and signal_cb of hw_fence + * @node: node for fences held in the dma-fences hash table linked lists + */ +struct hw_dma_fence { + struct dma_fence base; + char name[HW_FENCE_NAME_SIZE]; + void *client_handle; + u32 dma_fence_key; + bool is_internal; + struct hw_fence_signal_cb signal_cb; + struct hlist_node node; +}; + +static inline struct hw_dma_fence *to_hw_dma_fence(struct dma_fence *fence) +{ + return container_of(fence, struct hw_dma_fence, base); +} + +static const char *hw_fence_dbg_get_driver_name(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); + + return hw_dma_fence->name; +} + +static const char *hw_fence_dbg_get_timeline_name(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); + + return hw_dma_fence->name; +} + +static bool hw_fence_dbg_enable_signaling(struct dma_fence *fence) +{ + return true; +} + +static void _hw_fence_release(struct hw_dma_fence *hw_dma_fence) +{ + int ret = 0; + + if (IS_ERR_OR_NULL(hw_dma_fence->client_handle) || (hw_dma_fence->is_internal && + IS_ERR_OR_NULL(hw_dma_fence->signal_cb.drv_data))) { + HWFNC_ERR("invalid hwfence data %pK %pK, won't release hw_fence!\n", + hw_dma_fence->client_handle, hw_dma_fence->signal_cb.drv_data); + return; + } + + /* release hw-fence */ + if (hw_dma_fence->is_internal) /* internally owned hw_dma_fence has its own refcount */ + ret = hw_fence_destroy_refcount(hw_dma_fence->signal_cb.drv_data, + hw_dma_fence->signal_cb.hash, HW_FENCE_DMA_FENCE_REFCOUNT); + else /* externally owned hw_dma_fence uses standard hlos refcount */ + ret = msm_hw_fence_destroy(hw_dma_fence->client_handle, &hw_dma_fence->base); + + if (ret) + HWFNC_ERR("failed to release hw_fence!\n"); +} + +static void hw_fence_dbg_release(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence; + + if (!fence) + return; + + HWFNC_DBG_H("release backing fence %pK\n", fence); + hw_dma_fence = to_hw_dma_fence(fence); + + if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) + _hw_fence_release(hw_dma_fence); + + kfree(fence->lock); + kfree(hw_dma_fence); +} + +static struct dma_fence_ops hw_fence_dbg_ops = { + .get_driver_name = hw_fence_dbg_get_driver_name, + .get_timeline_name = hw_fence_dbg_get_timeline_name, + .enable_signaling = hw_fence_dbg_enable_signaling, + .wait = dma_fence_default_wait, + .release = hw_fence_dbg_release, +}; + +static inline bool dma_fence_is_hw_dma(struct dma_fence *fence) +{ + return fence->ops == &hw_fence_dbg_ops; +} + +#endif /* __HW_FENCE_DRV_HW_DMA_FENCE */ diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 386846dd8c..65a98a95fd 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include "msm_hw_fence.h" /* max u64 to indicate invalid fence */ @@ -59,9 +61,12 @@ * MSM_HW_FENCE_FLAG_SIGNAL - Flag set when the hw-fence is signaled * MSM_HW_FENCE_FLAG_CREATE_SIGNALED - Flag set when the hw-fence is created to back a signaled * dma-fence whose hw-fence has been destroyed + * MSM_HW_FENCE_FLAG_INTERNAL_OWNED - Flag set when HLOS Native fence is internally owned and + * present in dma-fence table */ #define MSM_HW_FENCE_FLAG_SIGNAL BIT(0) #define MSM_HW_FENCE_FLAG_CREATE_SIGNALED BIT(1) +#define MSM_HW_FENCE_FLAG_INTERNAL_OWNED BIT(2) /** * MSM_HW_FENCE_MAX_JOIN_PARENTS: @@ -89,13 +94,30 @@ */ #define HW_FENCE_FCTL_REFCOUNT BIT(31) +/** + * HW_FENCE_DMA_FENCE_REFCOUNT: + * Refcount held by HW Fence Driver for dma-fence release or signal. + * For dma-fences internally owned by the HW Fence Driver, this is set during hw-fence creation and + * cleared during dma_fence_release. + * For external dma-fences initialized by the client, this is set when the hw-fence signal callback + * is added to the dma-fence and cleared during dma_fence_signal. + */ +#define HW_FENCE_DMA_FENCE_REFCOUNT BIT(30) + /** * HW_FENCE_HLOS_REFCOUNT_MASK: * Mask for refcounts acquired and released from HLOS. * The field "hw_fence->refcount & HW_FENCE_HLOS_REFCOUNT_MASK" stores the number of refcounts held * by HW Fence clients or HW Fence Driver. */ -#define HW_FENCE_HLOS_REFCOUNT_MASK GENMASK(30, 0) +#define HW_FENCE_HLOS_REFCOUNT_MASK GENMASK(29, 0) + +/* + * DMA_FENCE_HASH_TABLE_BIT: Bit that define the size of the dma-fences hash table + * DMA_FENCE_HASH_TABLE_SIZE: Size of dma-fences hash table + */ +#define DMA_FENCE_HASH_TABLE_BIT (12) /* size of table = (1 << 12) = 4096 */ +#define DMA_FENCE_HASH_TABLE_SIZE (1 << DMA_FENCE_HASH_TABLE_BIT) enum hw_fence_lookup_ops { HW_FENCE_LOOKUP_OP_CREATE = 0x1, @@ -175,6 +197,8 @@ enum payload_type { * @ipc_client_pid: physical id of the ipc client for this hw fence driver client * @update_rxq: bool to indicate if client uses rx-queue * @send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences + * @context_id: context id for fences created internally + * @seqno: sequence no for fences created internally * @wait_queue: wait queue for the validation clients * @val_signal: doorbell flag to signal the validation clients in the wait queue */ @@ -192,6 +216,8 @@ struct msm_hw_fence_client { int ipc_client_pid; bool update_rxq; bool send_ipc; + u64 context_id; + atomic_t seqno; #if IS_ENABLED(CONFIG_DEBUG_FS) wait_queue_head_t wait_queue; atomic_t val_signal; @@ -347,6 +373,8 @@ struct hw_fence_signal_cb { * @clients: table with the handles of the registered clients; size is equal to clients_num * @vm_ready: flag to indicate if vm has been initialized * @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized + * @dma_fence_table_lock: lock to synchronize access to dma-fence table + * @dma_fence_table: table with internal dma-fences for hw-fences */ struct hw_fence_driver_data { @@ -428,6 +456,10 @@ struct hw_fence_driver_data { bool vm_ready; /* state variables */ bool ipcc_dpu_initialized; + + spinlock_t dma_fence_table_lock; + /* table with internal dma-fences created by the this driver on client's behalf */ + DECLARE_HASHTABLE(dma_fence_table, DMA_FENCE_HASH_TABLE_BIT); }; /** @@ -496,7 +528,8 @@ struct msm_hw_fence_event { * @fence_trigger_time: debug info with the trigger time timestamp * @fence_wait_time: debug info with the register-for-wait timestamp * @refcount: refcount on the hw-fence. This is split into multiple fields, see - * HW_FENCE_HLOS_REFCOUNT_MASK and HW_FENCE_FCTL_REFCOUNT for more detail + * HW_FENCE_HLOS_REFCOUNT_MASK and HW_FENCE_FCTL_REFCOUNT and HW_FENCE_DMA_FENCE_REFCOUNT + * for more detail * @client_data: array of data optionally passed from and returned to clients waiting on the fence * during fence signaling */ @@ -567,5 +600,13 @@ struct msm_hw_fence *hw_fence_find_with_dma_fence(struct hw_fence_driver_data *d struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash, bool *is_signaled, bool create); enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id client_id); +int hw_fence_signal_fence(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, u64 hash, + u32 error, bool release_ref); + +/* apis for internally managed dma-fence */ +struct dma_fence *hw_dma_fence_init(struct msm_hw_fence_client *hw_fence_client, u64 context, + u64 seqno); +struct dma_fence *hw_fence_internal_dma_fence_create(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 *hash); #endif /* __HW_FENCE_DRV_INTERNAL_H */ diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 22ff613e1a..26219f8db4 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -11,6 +11,7 @@ #include "hw_fence_drv_debug.h" #include "hw_fence_drv_ipc.h" #include "hw_fence_drv_utils.h" +#include "hw_fence_drv_fence.h" #define HW_FENCE_DEBUG_MAX_LOOPS 200 @@ -399,7 +400,7 @@ static ssize_t hw_fence_dbg_tx_and_signal_clients_wr(struct file *file, /******************************************/ /* cleanup hw fence for src client */ - ret = hw_fence_destroy(drv_data, hw_fence_client, context, seqno); + ret = hw_fence_destroy_with_hash(drv_data, hw_fence_client, hash); if (ret) { HWFNC_ERR("Error destroying HW fence\n"); goto exit; @@ -430,8 +431,8 @@ static ssize_t hw_fence_dbg_create_wr(struct file *file, struct msm_hw_fence_create_params params; struct hw_fence_driver_data *drv_data; struct client_data *client_info; - struct hw_dma_fence *dma_fence; - spinlock_t *fence_lock; + struct hw_dma_fence *hw_dma_fence; + struct dma_fence *fence; static u64 hw_fence_dbg_seqno = 1; int client_id, ret; u64 hash; @@ -446,39 +447,25 @@ static ssize_t hw_fence_dbg_create_wr(struct file *file, return -EINVAL; } - /* create debug dma_fence */ - fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL); - if (!fence_lock) - return -ENOMEM; + fence = hw_dma_fence_init(client_info->client_handle, client_info->dma_context, + hw_fence_dbg_seqno); + if (IS_ERR_OR_NULL(fence)) + return -EINVAL; + hw_dma_fence = (struct hw_dma_fence *)fence; - dma_fence = kzalloc(sizeof(*dma_fence), GFP_KERNEL); - if (!dma_fence) { - kfree(fence_lock); - return -ENOMEM; - } - - snprintf(dma_fence->name, HW_FENCE_NAME_SIZE, "hwfence:id:%d:ctx=%llu:seqno:%llu", - client_id, client_info->dma_context, hw_fence_dbg_seqno); - - spin_lock_init(fence_lock); - dma_fence_init(&dma_fence->base, &hw_fence_dbg_ops, fence_lock, - client_info->dma_context, hw_fence_dbg_seqno); - - HWFNC_DBG_H("creating hw_fence for client:%d ctx:%llu seqno:%llu\n", client_id, - client_info->dma_context, hw_fence_dbg_seqno); - params.fence = &dma_fence->base; + params.fence = fence; params.handle = &hash; ret = msm_hw_fence_create(client_info->client_handle, ¶ms); if (ret) { HWFNC_ERR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n", client_id, client_info->dma_context, hw_fence_dbg_seqno); - dma_fence_put(&dma_fence->base); + dma_fence_put(fence); return -EINVAL; } hw_fence_dbg_seqno++; /* keep handle in dma_fence, to destroy hw-fence during release */ - dma_fence->client_handle = client_info->client_handle; + hw_dma_fence->client_handle = client_info->client_handle; return count; } @@ -992,6 +979,21 @@ static ssize_t hw_fence_dbg_dump_table_wr(struct file *file, } +static inline void _cleanup_fences(int i, struct dma_fence **fences, spinlock_t **fences_lock) +{ + struct hw_dma_fence *dma_fence; + int fence_idx; + + for (fence_idx = i; fence_idx >= 0 ; fence_idx--) { + kfree(fences_lock[fence_idx]); + + dma_fence = to_hw_dma_fence(fences[fence_idx]); + kfree(dma_fence); + } + + kfree(fences_lock); + kfree(fences); +} /** * hw_fence_dbg_create_join_fence() - debugfs write to simulate the lifecycle of a join hw-fence. diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index a0f962044b..cb740c828e 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -11,6 +11,7 @@ #include "hw_fence_drv_utils.h" #include "hw_fence_drv_ipc.h" #include "hw_fence_drv_debug.h" +#include "hw_fence_drv_fence.h" /* Global atomic lock */ #define GLOBAL_ATOMIC_STORE(drv_data, lock, val) global_atomic_store(drv_data, lock, val) @@ -655,6 +656,12 @@ static int init_ctrl_queue(struct hw_fence_driver_data *drv_data) return ret; } +static void hw_fence_dma_fence_init_hash_table(struct hw_fence_driver_data *drv_data) +{ + hash_init(drv_data->dma_fence_table); + spin_lock_init(&drv_data->dma_fence_table_lock); +} + int hw_fence_init(struct hw_fence_driver_data *drv_data) { int ret; @@ -722,6 +729,8 @@ int hw_fence_init(struct hw_fence_driver_data *drv_data) goto exit; } + hw_fence_dma_fence_init_hash_table(drv_data); + mem = drv_data->io_mem_base; HWFNC_DBG_H("memory ptr:0x%pK val:0x%x\n", mem, *mem); @@ -1259,6 +1268,219 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d drv_data->hw_fence_table_entries); } +struct dma_fence *hw_dma_fence_init(struct msm_hw_fence_client *hw_fence_client, u64 context, + u64 seqno) +{ + struct hw_dma_fence *fence; + spinlock_t *fence_lock; + + /* create dma fence */ + fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL); + if (!fence_lock) + return ERR_PTR(-ENOMEM); + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (!fence) { + kfree(fence_lock); + return ERR_PTR(-ENOMEM); + } + + snprintf(fence->name, HW_FENCE_NAME_SIZE, "hwfence:id:%d:ctx=%llu:seqno:%llu", + hw_fence_client->client_id, context, seqno); + spin_lock_init(fence_lock); + + HWFNC_DBG_L("creating dma_fence for client:%d ctx:%llu seqno:%llu\n", + hw_fence_client->client_id, context, seqno); + + dma_fence_init(&fence->base, &hw_fence_dbg_ops, fence_lock, context, seqno); + fence->client_handle = hw_fence_client; + + return (struct dma_fence *)fence; +} + +int hw_fence_dma_fence_table_add(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 hw_fence_hash) +{ + struct hw_dma_fence *hw_dma_fence; + u32 dma_fence_key = hw_fence_hash % DMA_FENCE_HASH_TABLE_SIZE; + unsigned long flags; + + if (!fence || !drv_data || !hw_fence_client) { + HWFNC_ERR("invalid params fence:0x%pK drv_data:0x%pK hw_fence_client:0x%pK\n", + fence, drv_data, hw_fence_client); + return -EINVAL; + } + + hw_dma_fence = to_hw_dma_fence(fence); + HWFNC_DBG_L("add hw_dma_fence:%pK client:%d ctx:%llu seqno:%llu key:%u hash:%llu\n", + hw_dma_fence, hw_fence_client->client_id, fence->context, fence->seqno, + dma_fence_key, hw_fence_hash); + + hw_dma_fence->dma_fence_key = dma_fence_key; + hw_dma_fence->is_internal = true; + hw_dma_fence->signal_cb.hash = hw_fence_hash; + hw_dma_fence->signal_cb.drv_data = drv_data; + + spin_lock_irqsave(&drv_data->dma_fence_table_lock, flags); + hash_add(drv_data->dma_fence_table, &hw_dma_fence->node, dma_fence_key); + spin_unlock_irqrestore(&drv_data->dma_fence_table_lock, flags); + + return 0; +} + +static void msm_hw_fence_internal_signal_callback(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct hw_fence_signal_cb *signal_cb; + + if (!fence || !cb) { + HWFNC_ERR("Invalid params fence:0x%pK cb:0x%pK\n", fence, cb); + return; + } + + HWFNC_DBG_IRQ("dma-fence signal callback ctx:%llu seqno:%llu flags:%lx err:%d\n", + fence->context, fence->seqno, fence->flags, fence->error); + + signal_cb = (struct hw_fence_signal_cb *)cb; + + if (hw_fence_signal_fence(signal_cb->drv_data, fence, signal_cb->hash, fence->error, false)) + HWFNC_ERR("failed to signal fence ctx:%llu seq:%llu hash:%llu err:%u\n", + fence->context, fence->seqno, signal_cb->hash, fence->error); +} + + +struct dma_fence *hw_fence_internal_dma_fence_create(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 *hash) +{ + struct hw_dma_fence *hw_dma_fence; + struct msm_hw_fence *hw_fence; + struct dma_fence *fence; + u64 context, seqno; + int ret = 0; + + if (!drv_data || !hw_fence_client || !hash) + return ERR_PTR(-EINVAL); + + context = hw_fence_client->context_id; + seqno = atomic_add_return(1, &hw_fence_client->seqno); + fence = hw_dma_fence_init(hw_fence_client, context, seqno); + if (IS_ERR_OR_NULL(fence)) { + HWFNC_ERR("failed to create internal dma-fence client:%d ctx:%llu seq:%llu\n", + hw_fence_client->client_id, context, seqno); + return ERR_PTR(-EINVAL); + } + + ret = hw_fence_create(drv_data, hw_fence_client, context, seqno, hash); + if (ret) { + HWFNC_ERR("failed to back internal dma-fence client:%d ctx:%llu seq:%llu\n", + hw_fence_client->client_id, context, seqno); + ret = -EINVAL; + goto error; + } + + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, drv_data->hw_fences_tbl, *hash); + if (!hw_fence) { + HWFNC_ERR("bad hw fence hash:%llu client:%u\n", *hash, hw_fence_client->client_id); + ret = -EINVAL; + goto error; + } + + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ + hw_fence->flags |= MSM_HW_FENCE_FLAG_INTERNAL_OWNED; + hw_fence->refcount |= HW_FENCE_DMA_FENCE_REFCOUNT; + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + + /* If no error, set the HW Fence Flag in the dma-fence */ + set_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags); + + ret = hw_fence_dma_fence_table_add(drv_data, hw_fence_client, fence, *hash); + if (ret) { + HWFNC_ERR("failed to add hw-fence ctx:%llu seq:%llu hash:%llu to dma-fence table\n", + context, seqno, *hash); + ret = -EINVAL; + goto error; + } + + hw_dma_fence = to_hw_dma_fence(fence); + /* internal_signal_callback does not take an additional hw-fence refcount */ + ret = dma_fence_add_callback(fence, &hw_dma_fence->signal_cb.fence_cb, + msm_hw_fence_internal_signal_callback); + if (ret) + HWFNC_ERR("Failed to add signal callback ctx:%llu seq:%llu hash:%llu ret:%d\n", + context, seqno, *hash, ret); + +error: + if (ret) { + dma_fence_put(fence); + return ERR_PTR(ret); + } + + return fence; +} + +struct dma_fence *hw_fence_dma_fence_find(struct hw_fence_driver_data *drv_data, + u64 hw_fence_hash, bool incr_refcount) +{ + u32 dma_fence_key = hw_fence_hash % DMA_FENCE_HASH_TABLE_SIZE; + struct hw_dma_fence *hw_dma_fence = NULL, *curr; + struct dma_fence *fence = NULL; + unsigned long flags; + + spin_lock_irqsave(&drv_data->dma_fence_table_lock, flags); + hash_for_each_possible(drv_data->dma_fence_table, curr, node, dma_fence_key) { + if (hw_fence_hash == curr->signal_cb.hash) { + hw_dma_fence = curr; + fence = &hw_dma_fence->base; + if (incr_refcount) + dma_fence_get(fence); + break; + } + } + spin_unlock_irqrestore(&drv_data->dma_fence_table_lock, flags); + + HWFNC_DBG_L("hw_dma_fence: %s:%pK ctx:%llu seqno:%llu key:%u dma_fence_ref:%u incr:%s\n", + fence ? "found" : "not found", hw_dma_fence, + fence ? fence->context : 0, fence ? fence->seqno : 0, + dma_fence_key, fence ? kref_read(&fence->refcount) : -1, + incr_refcount ? "true" : "false"); + + return fence; +} + +static int hw_fence_dma_fence_table_del(struct hw_fence_driver_data *drv_data, u64 hash) +{ + struct hw_dma_fence *hw_dma_fence; + struct dma_fence *fence; + unsigned long flags; + int ret = 0; + + fence = hw_fence_dma_fence_find(drv_data, hash, false); + if (IS_ERR_OR_NULL(fence)) + return PTR_ERR(fence); + + hw_dma_fence = to_hw_dma_fence(fence); + + HWFNC_DBG_L("removing dma_fence ctx:%llu seqno:%llu key:%u dma_fence_ref:%u\n", + fence->context, fence->seqno, hw_dma_fence->dma_fence_key, + kref_read(&fence->refcount)); + + spin_lock_irqsave(&drv_data->dma_fence_table_lock, flags); + /* remove dma-fence from the internal hash table */ + if (hash_hashed(&hw_dma_fence->node)) + hash_del(&hw_dma_fence->node); + else + ret = -EINVAL; + spin_unlock_irqrestore(&drv_data->dma_fence_table_lock, flags); + + if (ret) + HWFNC_ERR("internally owned dma-fence is not in table ctx:%llu seqno:%llu key:%u\n", + fence->context, fence->seqno, hw_dma_fence->dma_fence_key); + + dma_fence_remove_callback(fence, &hw_dma_fence->signal_cb.fence_cb); + dma_fence_put(fence); + + return ret; +} + int hw_fence_create(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash) @@ -1299,13 +1521,61 @@ int hw_fence_destroy(struct hw_fence_driver_data *drv_data, return ret; } +/* + * This must be called while holding hw-fence lock; this releases hw-fence lock and (if needed) + * associated dma-fence if necessary + */ +static int hw_fence_put_and_unlock(struct hw_fence_driver_data *drv_data, u32 client_id, + struct msm_hw_fence *hw_fence, u64 hash) +{ + bool release_dma = false; + int ret = 0; + + if (hw_fence->refcount & HW_FENCE_HLOS_REFCOUNT_MASK) { + hw_fence->refcount--; + } else { + ret = -EINVAL; + goto end; /* keep hw-fence in table for debugging purposes */ + } + + if ((hw_fence->flags & MSM_HW_FENCE_FLAG_INTERNAL_OWNED) && + !(hw_fence->refcount & HW_FENCE_HLOS_REFCOUNT_MASK)) { + hw_fence->flags &= ~MSM_HW_FENCE_FLAG_INTERNAL_OWNED; + release_dma = true; + } + + if (!hw_fence->refcount) { + _cleanup_hw_fence(hw_fence); + + /* unreserve this HW fence */ + hw_fence->valid = 0; + } + +end: + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + + if (ret) { + HWFNC_ERR("fence client:%d ctx:%llu seq:%llu hash:%llu ref:0x%llx before decr\n", + client_id, hw_fence->ctx_id, hw_fence->seq_id, hash, hw_fence->refcount); + return ret; + } + + if (release_dma) { + ret = hw_fence_dma_fence_table_del(drv_data, hash); + if (ret) + HWFNC_ERR("Failed to delete internal dma-fence for hw-fence hash:%llu\n", + hash); + } + + return ret; +} + int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 hash) { u32 client_id = hw_fence_client ? hw_fence_client->client_id : ~0; struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; struct msm_hw_fence *hw_fence = NULL; - int ret = 0; hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, hash); if (!hw_fence) { @@ -1313,19 +1583,8 @@ int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, return -EINVAL; } - /* decrement refcount on hw-fence */ GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ - if (hw_fence->valid) - _unreserve_hw_fence(drv_data, hw_fence, client_id, hw_fence->ctx_id, - hw_fence->seq_id, hash, 0); - else - ret = -EINVAL; - GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ - - if (ret) - HWFNC_ERR("unreserve of invalid fence hash:%llu client:%u\n", hash, client_id); - - return ret; + return hw_fence_put_and_unlock(drv_data, client_id, hw_fence, hash); } static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_data *drv_data, @@ -1479,9 +1738,7 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data } } /* decrement refcount acquired by finding fence */ - _unreserve_hw_fence(drv_data, hw_fence_child, hw_fence_client->client_id, - hw_fence_child->ctx_id, hw_fence_child->seq_id, hash, 0); - GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */ + hw_fence_put_and_unlock(drv_data, hw_fence_client->client_id, hw_fence_child, hash); } destroy_fence: @@ -1612,15 +1869,9 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, hw_fence_child->parents_cnt--; /* decrement refcount acquired by finding fence */ - _unreserve_hw_fence(drv_data, hw_fence_child, - hw_fence_client->client_id, hw_fence_child->ctx_id, - hw_fence_child->seq_id, hash, 0); + hw_fence_put_and_unlock(drv_data, hw_fence_client->client_id, + hw_fence_child, hash); - /* update memory for the table update */ - wmb(); - - /* unlock */ - GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); ret = -EINVAL; goto error_array; } @@ -1629,12 +1880,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, *hash_join_fence; } /* decrement refcount acquired by finding fence */ - _unreserve_hw_fence(drv_data, hw_fence_child, hw_fence_client->client_id, - hw_fence_child->ctx_id, hw_fence_child->seq_id, hash, 0); - - /* update memory for the table update */ - wmb(); - GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */ + hw_fence_put_and_unlock(drv_data, hw_fence_client->client_id, hw_fence_child, hash); } if (client_data) @@ -1989,12 +2235,11 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, hw_fence->wait_client_mask &= ~BIT(hw_fence_client->client_id); /* remove reference held by waiting client */ - if (!(reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY)) - _unreserve_hw_fence(drv_data, hw_fence, hw_fence_client->client_id, - hw_fence->ctx_id, hw_fence->seq_id, hash, 0); - - /* update memory for the table update */ - wmb(); + if (!(reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY)) { + hw_fence_put_and_unlock(drv_data, hw_fence_client->client_id, hw_fence, + hash); + return 0; + } } GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ @@ -2006,11 +2251,9 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, if (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY) goto skip_destroy; - ret = hw_fence_destroy(drv_data, hw_fence_client, - hw_fence->ctx_id, hw_fence->seq_id); + ret = hw_fence_destroy_with_hash(drv_data, hw_fence_client, hash); if (ret) { - HWFNC_ERR("Error destroying HW fence: ctx:%llu seqno:%llu\n", - hw_fence->ctx_id, hw_fence->seq_id); + HWFNC_ERR("Error destroying HW fence: hash:%llu\n", hash); } } @@ -2046,44 +2289,58 @@ enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id return data_id; } +int hw_fence_signal_fence(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, u64 hash, + u32 error, bool release_ref) +{ + struct msm_hw_fence *hw_fence; + + if (!drv_data || !fence) { + HWFNC_ERR("bad params drv_data:0x%pK fence:0x%pK\n", drv_data, fence); + return -EINVAL; + } + + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, drv_data->hw_fences_tbl, hash); + if (!hw_fence) { + HWFNC_ERR("bad hw fence hash:%llu\n", hash); + return -EINVAL; + } + + if (hw_fence->ctx_id != fence->context || hw_fence->seq_id != fence->seqno) { + HWFNC_ERR("invalid hfence hash:%llu ctx:%llu seq:%llu expected ctx:%llu seq:%llu\n", + hash, hw_fence->ctx_id, hw_fence->seq_id, fence->context, fence->seqno); + return -EINVAL; + } + + /* if unsignaled, signal but do not release ref held by FCTL */ + _signal_fence_if_unsignaled(drv_data, hw_fence, hash, error, release_ref); + + return 0; +} + static void msm_hw_fence_signal_callback(struct dma_fence *fence, struct dma_fence_cb *cb) { - struct hw_fence_driver_data *drv_data; struct hw_fence_signal_cb *signal_cb; - struct msm_hw_fence *hw_fence; - u64 hash; + int ret = 0; if (!fence || !cb) { HWFNC_ERR("Invalid params fence:0x%pK cb:0x%pK\n", fence, cb); return; } - signal_cb = (struct hw_fence_signal_cb *)cb; - drv_data = signal_cb->drv_data; - hash = signal_cb->hash; - if (!drv_data) { - HWFNC_ERR("invalid signal_cb params\n"); - return; - } HWFNC_DBG_IRQ("dma-fence signal callback ctx:%llu seqno:%llu flags:%lx err:%d\n", fence->context, fence->seqno, fence->flags, fence->error); - hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, drv_data->hw_fences_tbl, hash); - if (!hw_fence) { - HWFNC_ERR("bad hw fence hash:%llu\n", hash); - goto error; - } + signal_cb = (struct hw_fence_signal_cb *)cb; + ret = hw_fence_signal_fence(signal_cb->drv_data, fence, signal_cb->hash, fence->error, + false); + if (ret) + HWFNC_ERR("failed to signal fence ctx:%llu seq:%llu hash:%llu err:%u\n", + fence->context, fence->seqno, signal_cb->hash, fence->error); + else + /* release ref held by dma-fence signal */ + hw_fence_destroy_refcount(signal_cb->drv_data, signal_cb->hash, + HW_FENCE_DMA_FENCE_REFCOUNT); - if (hw_fence->ctx_id != fence->context || hw_fence->seq_id != fence->seqno) { - HWFNC_ERR("invalid hfence hash:%llu ctx:%llu seq:%llu expected ctx:%llu seq:%llu\n", - hash, hw_fence->ctx_id, hw_fence->seq_id, fence->context, fence->seqno); - goto error; - } - - /* if unsignaled, signal but do not release ref held by FCTL */ - _signal_fence_if_unsignaled(drv_data, hw_fence, hash, fence->error, false); - hw_fence_destroy_with_hash(drv_data, NULL, hash); /* release ref held by dma-fence signal */ -error: kfree(signal_cb); } @@ -2107,7 +2364,7 @@ int hw_fence_add_callback(struct hw_fence_driver_data *drv_data, struct dma_fenc signal_cb->hash = hash; GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); - hw_fence->refcount++; + hw_fence->refcount |= HW_FENCE_DMA_FENCE_REFCOUNT; GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); ret = dma_fence_add_callback(fence, &signal_cb->fence_cb, msm_hw_fence_signal_callback); @@ -2121,7 +2378,8 @@ int hw_fence_add_callback(struct hw_fence_driver_data *drv_data, struct dma_fenc HWFNC_ERR("failed to add signal_cb ctx:%llu seq:%llu f:%lx err:%d ret:%d\n", fence->context, fence->seqno, fence->flags, fence->error, ret); /* release ref held by dma-fence signal */ - hw_fence_destroy_with_hash(drv_data, NULL, hash); + hw_fence_destroy_refcount(signal_cb->drv_data, signal_cb->hash, + HW_FENCE_DMA_FENCE_REFCOUNT); kfree(signal_cb); } } diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 7922651852..0fbb69b30e 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -14,6 +14,7 @@ #include "hw_fence_drv_utils.h" #include "hw_fence_drv_ipc.h" #include "hw_fence_drv_debug.h" +#include "hw_fence_drv_fence.h" #define HW_SYNC_IOCTL_COUNT ARRAY_SIZE(hw_sync_debugfs_ioctls) #define HW_FENCE_ARRAY_SIZE 10 @@ -229,7 +230,7 @@ static long hw_sync_ioctl_create_fence(struct hw_sync_obj *obj, unsigned long ar struct msm_hw_fence_create_params params; struct hw_fence_sync_create_data data; struct hw_dma_fence *fence; - spinlock_t *fence_lock; + struct dma_fence *dma_fence; u64 hash; struct sync_file *sync_file; int fd, ret; @@ -244,26 +245,12 @@ static long hw_sync_ioctl_create_fence(struct hw_sync_obj *obj, unsigned long ar if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; - /* create dma fence */ - fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL); - if (!fence_lock) - return -ENOMEM; + dma_fence = hw_dma_fence_init(obj->client_handle, obj->context, data.seqno); + if (IS_ERR_OR_NULL(dma_fence)) + return -EINVAL; + fence = (struct hw_dma_fence *)dma_fence; - fence = kzalloc(sizeof(*fence), GFP_KERNEL); - if (!fence) { - kfree(fence_lock); - return -ENOMEM; - } - - snprintf(fence->name, HW_FENCE_NAME_SIZE, "hwfence:id:%d:ctx=%llu:seqno:%llu", - obj->client_id, obj->context, data.seqno); - - spin_lock_init(fence_lock); - dma_fence_init(&fence->base, &hw_fence_dbg_ops, fence_lock, obj->context, data.seqno); - - HWFNC_DBG_H("creating hw_fence for client:%d ctx:%llu seqno:%llu\n", obj->client_id, - obj->context, data.seqno); - params.fence = &fence->base; + params.fence = dma_fence; params.handle = &hash; /* create hw fence */ diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 95176443e8..aa3563894a 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -13,6 +13,7 @@ #include "hw_fence_drv_utils.h" #include "hw_fence_drv_debug.h" #include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_fence.h" struct hw_fence_driver_data *hw_fence_drv_data; bool hw_fence_driver_enable; @@ -117,6 +118,7 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, if (ret) goto error; + hw_fence_client->context_id = dma_fence_context_alloc(1); mutex_init(&hw_fence_client->error_cb_lock); HWFNC_DBG_INIT("Initialized ptr:0x%p client_id:%d q_num:%d ipc signal:%d vid:%d pid:%d\n", @@ -173,7 +175,7 @@ int msm_hw_fence_create(void *client_handle, struct dma_fence *fence; int ret; - if (IS_ERR_OR_NULL(client_handle) || !params || !params->fence || !params->handle) { + if (IS_ERR_OR_NULL(client_handle) || !params || !params->handle) { HWFNC_ERR("Invalid input\n"); return -EINVAL; } @@ -183,10 +185,23 @@ int msm_hw_fence_create(void *client_handle, return -EAGAIN; } + HWFNC_DBG_H("+\n"); + hw_fence_client = (struct msm_hw_fence_client *)client_handle; fence = (struct dma_fence *)params->fence; - HWFNC_DBG_H("+\n"); + /* if not provided, create a dma-fence */ + if (!fence) { + fence = hw_fence_internal_dma_fence_create(hw_fence_drv_data, hw_fence_client, + params->handle); + if (IS_ERR_OR_NULL(fence)) { + HWFNC_ERR("failed to create internal dma-fence for client:%d err:%ld\n", + hw_fence_client->client_id, PTR_ERR(fence)); + return PTR_ERR(fence); + } + + return 0; + } /* Block any Fence-Array, we should only get individual fences */ array = to_dma_fence_array(fence); @@ -202,8 +217,8 @@ int msm_hw_fence_create(void *client_handle, } /* Create the HW Fence, i.e. add entry in the Global Table for this Fence */ - ret = hw_fence_create(hw_fence_drv_data, hw_fence_client, - fence->context, fence->seqno, params->handle); + ret = hw_fence_create(hw_fence_drv_data, hw_fence_client, fence->context, + fence->seqno, params->handle); if (ret) { HWFNC_ERR("Error creating HW fence\n"); return ret; @@ -258,6 +273,12 @@ int msm_hw_fence_destroy(void *client_handle, return -EINVAL; } + if (dma_fence_is_hw_dma(fence)) { + HWFNC_ERR("deprecated api cannot destroy hw_dma_fence ctx:%llu seq:%llu\n", + fence->context, fence->seqno); + return -EINVAL; + } + /* Destroy the HW Fence, i.e. remove entry in the Global Table for the Fence */ ret = hw_fence_destroy(hw_fence_drv_data, hw_fence_client, fence->context, fence->seqno); diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 3ddc4a26e3..8cd20caf79 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -169,11 +169,16 @@ int synx_hwfence_create(struct synx_session *session, struct synx_create_params } if (IS_ERR_OR_NULL(params->h_synx) || (params->flags > SYNX_CREATE_MAX_FLAGS) || - !(params->flags & SYNX_CREATE_DMA_FENCE) || - (params->flags & SYNX_CREATE_CSL_FENCE) || - IS_ERR_OR_NULL(params->fence)) { - HWFNC_ERR("synx_id:%d invalid create params h_synx:0x%pK flags:0x%x fence:0x%pK\n", - session->type, params->h_synx, params->flags, params->fence); + (params->flags & SYNX_CREATE_CSL_FENCE)) { + HWFNC_ERR("synx_id:%d invalid create params h_synx:0x%pK flags:0x%x\n", + session->type, params->h_synx, params->flags); + return -SYNX_INVALID; + } + + /* if SYNX_CREATE_DMA_FENCE specified and no dma-fence, fail */ + if (!params->fence && (params->flags & SYNX_CREATE_DMA_FENCE)) { + HWFNC_ERR("synx_id:%d invalid fence:%pK params flags:0x%x\n", + session->type, params->fence, params->flags); return -SYNX_INVALID; } From 7698c4e7dc36079f2bd923bfc1e5eaaff87e8593 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 28 Nov 2023 14:00:22 -0800 Subject: [PATCH 096/166] mm-drivers: hw_fence: add support to initialize hw-fence ops at runtime This change removes function prototypes and stubs from hw-fence driver and adds support to initialize hw-fence operations at runtime. Change-Id: If54354c2bc9798eed7ba315861bcf2b91462ad3f Signed-off-by: Grace An --- hw_fence/include/msm_hw_fence.h | 5 +- .../include/msm_hw_fence_synx_translation.h | 223 ------------------ hw_fence/src/msm_hw_fence_synx_translation.c | 36 ++- 3 files changed, 29 insertions(+), 235 deletions(-) delete mode 100644 hw_fence/include/msm_hw_fence_synx_translation.h diff --git a/hw_fence/include/msm_hw_fence.h b/hw_fence/include/msm_hw_fence.h index 62c0f3ba85..026098c401 100644 --- a/hw_fence/include/msm_hw_fence.h +++ b/hw_fence/include/msm_hw_fence.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2020, The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __MSM_HW_FENCE_H @@ -10,6 +10,9 @@ #include #include +extern struct hw_fence_driver_data *hw_fence_drv_data; +extern bool hw_fence_driver_enable; + /** * MSM_HW_FENCE_FLAG_ENABLED_BIT - Hw-fence is enabled for the dma_fence. * diff --git a/hw_fence/include/msm_hw_fence_synx_translation.h b/hw_fence/include/msm_hw_fence_synx_translation.h deleted file mode 100644 index 5d557bab2f..0000000000 --- a/hw_fence/include/msm_hw_fence_synx_translation.h +++ /dev/null @@ -1,223 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. - */ - -#ifndef __MSM_HW_FENCE_SYNX_TRANSLATION_H -#define __MSM_HW_FENCE_SYNX_TRANSLATION_H - -#include - -extern bool hw_fence_driver_enable; -extern struct hw_fence_driver_data *hw_fence_drv_data; - -#ifndef SYNX_HW_FENCE_CLIENT_START -#define SYNX_HW_FENCE_CLIENT_START 1024 -#define SYNX_HW_FENCE_CLIENT_END 4096 -#define SYNX_MAX_SIGNAL_PER_CLIENT 64 - -/** - * enum synx_client_id : Unique identifier of the supported clients - * - * @SYNX_CLIENT_HW_FENCE_GFX_CTX0 : HW Fence GFX Client 0 - * @SYNX_CLIENT_HW_FENCE_IPE_CTX0 : HW Fence IPE Client 0 - * @SYNX_CLIENT_HW_FENCE_VID_CTX0 : HW Fence Video Client 0 - * @SYNX_CLIENT_HW_FENCE_DPU0_CTL0 : HW Fence DPU0 Client 0 - * @SYNX_CLIENT_HW_FENCE_DPU1_CTL0 : HW Fence DPU1 Client 0 - * @SYNX_CLIENT_HW_FENCE_IFE0_CTX0 : HW Fence IFE0 Client 0 - * @SYNX_CLIENT_HW_FENCE_IFE1_CTX0 : HW Fence IFE1 Client 0 - * @SYNX_CLIENT_HW_FENCE_IFE2_CTX0 : HW Fence IFE2 Client 0 - * @SYNX_CLIENT_HW_FENCE_IFE3_CTX0 : HW Fence IFE3 Client 0 - * @SYNX_CLIENT_HW_FENCE_IFE4_CTX0 : HW Fence IFE4 Client 0 - * @SYNX_CLIENT_HW_FENCE_IFE5_CTX0 : HW Fence IFE5 Client 0 - * @SYNX_CLIENT_HW_FENCE_IFE6_CTX0 : HW Fence IFE6 Client 0 - * @SYNX_CLIENT_HW_FENCE_IFE7_CTX0 : HW Fence IFE7 Client 0 - * @SYNX_CLIENT_HW_FENCE_IFE8_CTX0 : HW Fence IFE8 Client 0 - * @SYNX_CLIENT_HW_FENCE_IFE9_CTX0 : HW Fence IFE9 Client 0 - * @SYNX_CLIENT_HW_FENCE_IFE10_CTX0 : HW Fence IFE10 Client 0 - * @SYNX_CLIENT_HW_FENCE_IFE11_CTX0 : HW Fence IFE11 Client 0 - * @SYNX_CLIENT_HW_FENCE_IFE12_CTX0 : HW Fence IFE12 Client 0 - * @SYNX_CLIENT_HW_FENCE_IFE13_CTX0 : HW Fence IFE13 Client 0 - * @SYNX_CLIENT_HW_FENCE_IFE14_CTX0 : HW Fence IFE14 Client 0 - * @SYNX_CLIENT_HW_FENCE_IFE15_CTX0 : HW Fence IFE15 Client 0 - */ -enum synx_hwfence_client_id { - SYNX_CLIENT_HW_FENCE_GFX_CTX0 = SYNX_HW_FENCE_CLIENT_START, - SYNX_CLIENT_HW_FENCE_IPE_CTX0 = SYNX_CLIENT_HW_FENCE_GFX_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_VID_CTX0 = SYNX_CLIENT_HW_FENCE_IPE_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_DPU0_CTL0 = SYNX_CLIENT_HW_FENCE_VID_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_DPU1_CTL0 = SYNX_CLIENT_HW_FENCE_DPU0_CTL0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_IFE0_CTX0 = SYNX_CLIENT_HW_FENCE_DPU1_CTL0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_IFE1_CTX0 = SYNX_CLIENT_HW_FENCE_IFE0_CTX0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_IFE2_CTX0 = SYNX_CLIENT_HW_FENCE_IFE1_CTX0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_IFE3_CTX0 = SYNX_CLIENT_HW_FENCE_IFE2_CTX0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_IFE4_CTX0 = SYNX_CLIENT_HW_FENCE_IFE3_CTX0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_IFE5_CTX0 = SYNX_CLIENT_HW_FENCE_IFE4_CTX0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_IFE6_CTX0 = SYNX_CLIENT_HW_FENCE_IFE5_CTX0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_IFE7_CTX0 = SYNX_CLIENT_HW_FENCE_IFE6_CTX0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_IFE8_CTX0 = SYNX_CLIENT_HW_FENCE_IFE7_CTX0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_IFE9_CTX0 = SYNX_CLIENT_HW_FENCE_IFE8_CTX0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_IFE10_CTX0 = SYNX_CLIENT_HW_FENCE_IFE9_CTX0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_IFE11_CTX0 = SYNX_CLIENT_HW_FENCE_IFE10_CTX0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_IFE12_CTX0 = SYNX_CLIENT_HW_FENCE_IFE11_CTX0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_IFE13_CTX0 = SYNX_CLIENT_HW_FENCE_IFE12_CTX0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_IFE14_CTX0 = SYNX_CLIENT_HW_FENCE_IFE13_CTX0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_IFE15_CTX0 = SYNX_CLIENT_HW_FENCE_IFE14_CTX0 + - SYNX_MAX_SIGNAL_PER_CLIENT, - SYNX_CLIENT_HW_FENCE_MAX = SYNX_HW_FENCE_CLIENT_END, -}; -#endif - -#if IS_ENABLED(CONFIG_QTI_HW_FENCE) -/** - * synx_hwfence_initialize - Initializes a new client session - * - * @param params : Pointer to session init params - * - * @return Client session pointer on success. NULL or error in case of failure. - */ -struct synx_session *synx_hwfence_initialize(struct synx_initialization_params *params); - -/** - * synx_hwfence_uninitialize - Destroys the client session - * - * @param session : Session ptr (returned from synx_initialize) - * - * @return Status of operation. SYNX_SUCCESS in case of success. - */ -int synx_hwfence_uninitialize(struct synx_session *session); - -/** - * synx_hwfence_create - Creates a synx object - * - * Creates a new synx obj and returns the handle to client. - * - * @param session : Session ptr (returned from synx_initialize) - * @param params : Pointer to create params - * - * @return Status of operation. SYNX_SUCCESS in case of success. - * -SYNX_INVALID will be returned if params were invalid. - * -SYNX_NOMEM will be returned if the kernel can't allocate space for - * synx object. - */ -int synx_hwfence_create(struct synx_session *session, struct synx_create_params *params); - -/** - * synx_hwfence_release - Release the synx object - * - * @param session : Session ptr (returned from synx_initialize) - * @param h_synx : Synx object handle to be destroyed - * - * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise. - */ -int synx_hwfence_release(struct synx_session *session, u32 h_synx); - -/** - * synx_hwfence_signal - Signals a synx object with the status argument. - * - * This function will signal the synx object referenced by h_synx - * and invoke any external binding synx objs. - * The status parameter will indicate whether the entity - * performing the signaling wants to convey an error case or a success case. - * - * @param session : Session ptr (returned from synx_initialize) - * @param h_synx : Synx object handle - * @param status : Status of signaling. - * Clients can send custom signaling status - * beyond SYNX_STATE_SIGNALED_MAX. - * - * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise. - */ -int synx_hwfence_signal(struct synx_session *session, u32 h_synx, enum synx_signal_status status); - -/** - * synx_hwfence_recover - Recover any possible handle leaks - * - * Function should be called on HW hang/reset to - * recover the Synx handles shared. This cleans up - * Synx handles held by the rest HW, and avoids - * potential resource leaks. - * - * Function does not destroy the session, but only - * recover synx handles belonging to the session. - * Synx session would still be active and clients - * need to destroy the session explicitly through - * synx_uninitialize API. - * - * @param id : Client ID of core to recover - * - * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise. - */ -int synx_hwfence_recover(enum synx_client_id id); - -/** - * synx_hwfence_import - Imports (looks up) synx object from given handle/fence - * - * Import subscribes the client session for notification on signal - * of handles/fences. - * - * @param session : Session ptr (returned from synx_initialize) - * @param params : Pointer to import params - * - * @return SYNX_SUCCESS upon success, -SYNX_INVAL if synx object is bad state - */ -int synx_hwfence_import(struct synx_session *session, struct synx_import_params *params); - -#else /* CONFIG_QTI_HW_FENCE */ -static inline struct synx_session *synx_hwfence_initialize( - struct synx_initialization_params *params) -{ - return ERR_PTR(-SYNX_INVALID); -} - -static inline int synx_hwfence_uninitialize(struct synx_session *session) -{ - return -SYNX_INVALID; -} - -static inline int synx_hwfence_create(struct synx_session *session, - struct synx_create_params *params) -{ - return -SYNX_INVALID; -} - -static inline int synx_hwfence_release(struct synx_session *session, u32 h_synx) -{ - return -SYNX_INVALID; -} - -static inline int synx_hwfence_signal(struct synx_session *session, u32 h_synx, - enum synx_signal_status status) -{ - return -SYNX_INVALID; -} - -static inline int synx_hwfence_recover(enum synx_client_id id) -{ - return -SYNX_INVALID; -} - -static inline int synx_hwfence_import(struct synx_session *session, - struct synx_import_params *params) -{ - return -SYNX_INVALID; -} - -#endif /* CONFIG_QTI_HW_FENCE */ -#endif /* __MSM_HW_FENCE_SYNX_TRANSLATION_H */ diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 8cd20caf79..43e7ea5b72 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -1,12 +1,13 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include +#include +#include #include "msm_hw_fence.h" -#include "msm_hw_fence_synx_translation.h" #include "hw_fence_drv_priv.h" #include "hw_fence_drv_debug.h" @@ -135,7 +136,7 @@ struct synx_session *synx_hwfence_initialize(struct synx_initialization_params * } EXPORT_SYMBOL_GPL(synx_hwfence_initialize); -int synx_hwfence_uninitialize(struct synx_session *session) +static int synx_hwfence_uninitialize(struct synx_session *session) { int ret; @@ -153,9 +154,8 @@ int synx_hwfence_uninitialize(struct synx_session *session) return to_synx_status(ret); } -EXPORT_SYMBOL_GPL(synx_hwfence_uninitialize); -int synx_hwfence_create(struct synx_session *session, struct synx_create_params *params) +static int synx_hwfence_create(struct synx_session *session, struct synx_create_params *params) { int ret = 0; struct msm_hw_fence_create_params hwfence_params; @@ -201,9 +201,8 @@ int synx_hwfence_create(struct synx_session *session, struct synx_create_params return SYNX_SUCCESS; } -EXPORT_SYMBOL_GPL(synx_hwfence_create); -int synx_hwfence_release(struct synx_session *session, u32 h_synx) +static int synx_hwfence_release(struct synx_session *session, u32 h_synx) { int ret; @@ -220,9 +219,9 @@ int synx_hwfence_release(struct synx_session *session, u32 h_synx) return to_synx_status(ret); } -EXPORT_SYMBOL_GPL(synx_hwfence_release); -int synx_hwfence_signal(struct synx_session *session, u32 h_synx, enum synx_signal_status status) +static int synx_hwfence_signal(struct synx_session *session, u32 h_synx, + enum synx_signal_status status) { int ret; @@ -239,7 +238,6 @@ int synx_hwfence_signal(struct synx_session *session, u32 h_synx, enum synx_sign return to_synx_status(ret); } -EXPORT_SYMBOL_GPL(synx_hwfence_signal); int synx_hwfence_recover(enum synx_client_id id) { @@ -337,4 +335,20 @@ int synx_hwfence_import(struct synx_session *session, struct synx_import_params return ret; } -EXPORT_SYMBOL_GPL(synx_hwfence_import); + +int synx_hwfence_init_ops(struct synx_ops *hwfence_ops) +{ + if (IS_ERR_OR_NULL(hwfence_ops)) { + HWFNC_ERR("invalid ops\n"); + return -SYNX_INVALID; + } + + hwfence_ops->uninitialize = synx_hwfence_uninitialize; + hwfence_ops->create = synx_hwfence_create; + hwfence_ops->release = synx_hwfence_release; + hwfence_ops->signal = synx_hwfence_signal; + hwfence_ops->import = synx_hwfence_import; + + return SYNX_SUCCESS; +} +EXPORT_SYMBOL_GPL(synx_hwfence_init_ops); From 40c197ab9ddf71169c5384664e3639f271fb037f Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 14 Sep 2023 09:47:34 -0700 Subject: [PATCH 097/166] mm-drivers: hw_fence: add support for synx_hwfence_get_fence This change adds support to get the dma-fence for fences created internally by the hw-fence driver. Change-Id: I5534e4bfd80ace9e58f1b9397a9377c56145146f Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 4 +++- hw_fence/src/msm_hw_fence_synx_translation.c | 21 ++++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 65a98a95fd..bd6980ac66 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __HW_FENCE_DRV_INTERNAL_H @@ -608,5 +608,7 @@ struct dma_fence *hw_dma_fence_init(struct msm_hw_fence_client *hw_fence_client, u64 seqno); struct dma_fence *hw_fence_internal_dma_fence_create(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 *hash); +struct dma_fence *hw_fence_dma_fence_find(struct hw_fence_driver_data *drv_data, + u64 hash, bool incr_refcount); #endif /* __HW_FENCE_DRV_INTERNAL_H */ diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 43e7ea5b72..64f315a4eb 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -257,6 +257,26 @@ int synx_hwfence_recover(enum synx_client_id id) } EXPORT_SYMBOL_GPL(synx_hwfence_recover); +static void *synx_hwfence_get_fence(struct synx_session *session, u32 h_synx) +{ + struct dma_fence *fence = NULL; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type); + return ERR_PTR(-SYNX_INVALID); + } + + fence = hw_fence_dma_fence_find(hw_fence_drv_data, h_synx, true); + + /* add a reference to the dma-fence, this must be released by the caller */ + if (IS_ERR_OR_NULL(fence)) + HWFNC_ERR("synx_id:%d failed to get fence for h_synx:%u ret:%ld\n", session->type, + h_synx, PTR_ERR(fence)); + + return (void *)fence; +} + static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params *params) { u64 handle; @@ -348,6 +368,7 @@ int synx_hwfence_init_ops(struct synx_ops *hwfence_ops) hwfence_ops->release = synx_hwfence_release; hwfence_ops->signal = synx_hwfence_signal; hwfence_ops->import = synx_hwfence_import; + hwfence_ops->get_fence = synx_hwfence_get_fence; return SYNX_SUCCESS; } From b76208ef25e2f1d6f68102372a616c40ce537298 Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 8 Jun 2023 11:09:44 -0700 Subject: [PATCH 098/166] mm-drivers: hw_fence: add hwfence implementation of synx_get_status Add API to get status of hw-fence. Update signal implementation to use synx signal statuses. Change-Id: Iaaa8e6c952793cda807b4f77d253054caa573fea Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 2 + hw_fence/src/hw_fence_drv_priv.c | 23 ++++- hw_fence/src/msm_hw_fence_synx_translation.c | 90 +++++++++++++++++++- 3 files changed, 110 insertions(+), 5 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index bd6980ac66..4122355fa4 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -602,6 +602,8 @@ struct msm_hw_fence *hw_fence_find_with_dma_fence(struct hw_fence_driver_data *d enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id client_id); int hw_fence_signal_fence(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, u64 hash, u32 error, bool release_ref); +int hw_fence_get_flags_error(struct hw_fence_driver_data *drv_data, u64 hash, u64 *flags, + u32 *error); /* apis for internally managed dma-fence */ struct dma_fence *hw_dma_fence_init(struct msm_hw_fence_client *hw_fence_client, u64 context, diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index cb740c828e..6168c31e19 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -2386,3 +2386,24 @@ int hw_fence_add_callback(struct hw_fence_driver_data *drv_data, struct dma_fenc return ret; } + +int hw_fence_get_flags_error(struct hw_fence_driver_data *drv_data, u64 hash, u64 *flags, + u32 *error) +{ + struct msm_hw_fence *hw_fence; + + if (!drv_data) { + HWFNC_ERR("invalid drv_data\n"); + return -EINVAL; + } + + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, drv_data->hw_fences_tbl, hash); + if (!hw_fence) { + HWFNC_ERR("Failed to get hw-fence for hash:%llu\n", hash); + return -EINVAL; + } + *flags = hw_fence->flags; + *error = hw_fence->error; + + return 0; +} diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 64f315a4eb..1127596724 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -96,6 +96,61 @@ static bool is_hw_fence_client(enum synx_client_id synx_client_id) && synx_client_id < SYNX_HW_FENCE_CLIENT_END; } +static u32 _to_synx_signal_status(u32 flags, u32 error) +{ + u32 status; + + if (!(flags & MSM_HW_FENCE_FLAG_SIGNAL)) { + status = SYNX_STATE_ACTIVE; + goto end; + } + + switch (error) { + case 0: + status = SYNX_STATE_SIGNALED_SUCCESS; + break; + case MSM_HW_FENCE_ERROR_RESET: + status = SYNX_STATE_SIGNALED_SSR; + break; + default: + status = error; + break; + } + +end: + HWFNC_DBG_L("fence flags:%u err:%u status:%u\n", flags, error, status); + + return status; +} + +static u32 _to_hwfence_fence_error(u32 status) +{ + u32 error; + + switch (status) { + case SYNX_STATE_INVALID: + HWFNC_ERR("converting error status for invalid fence\n"); + error = SYNX_INVALID; + break; + case SYNX_STATE_ACTIVE: + HWFNC_ERR("converting error status for unsignaled fence\n"); + error = 0; + break; + case SYNX_STATE_SIGNALED_SUCCESS: + error = 0; + break; + case SYNX_STATE_SIGNALED_SSR: + error = MSM_HW_FENCE_ERROR_RESET; + break; + default: + error = status; + break; + } + HWFNC_DBG_L("fence status:%u err:%u\n", status, error); + + return error; +} + struct synx_session *synx_hwfence_initialize(struct synx_initialization_params *params) { struct synx_session *session = NULL; @@ -223,15 +278,20 @@ static int synx_hwfence_release(struct synx_session *session, u32 h_synx) static int synx_hwfence_signal(struct synx_session *session, u32 h_synx, enum synx_signal_status status) { + u32 error; int ret; - if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) { - HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session, - IS_ERR_OR_NULL(session) ? -1 : session->type); + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || + !(status == SYNX_STATE_SIGNALED_SUCCESS || + status == SYNX_STATE_SIGNALED_CANCEL || + status > SYNX_STATE_SIGNALED_MAX)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d status:%u\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type, status); return -SYNX_INVALID; } - ret = msm_hw_fence_update_txq(session->client, h_synx, 0, (u32)status); + error = _to_hwfence_fence_error(status); + ret = msm_hw_fence_update_txq(session->client, h_synx, 0, error); if (ret) HWFNC_ERR("synx_id:%d failed to signal fence h_synx:%u status:%d ret:%d\n", session->type, h_synx, status, ret); @@ -277,6 +337,27 @@ static void *synx_hwfence_get_fence(struct synx_session *session, u32 h_synx) return (void *)fence; } +static int synx_hwfence_get_status(struct synx_session *session, u32 h_synx) +{ + u64 flags; + u32 error; + int ret; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type); + return SYNX_STATE_INVALID; + } + + ret = hw_fence_get_flags_error(hw_fence_drv_data, h_synx, &flags, &error); + if (ret) { + HWFNC_ERR("Failed to get status for client:%d h_synx:%u\n", session->type, h_synx); + return SYNX_STATE_INVALID; + } + + return _to_synx_signal_status(flags, error); +} + static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params *params) { u64 handle; @@ -369,6 +450,7 @@ int synx_hwfence_init_ops(struct synx_ops *hwfence_ops) hwfence_ops->signal = synx_hwfence_signal; hwfence_ops->import = synx_hwfence_import; hwfence_ops->get_fence = synx_hwfence_get_fence; + hwfence_ops->get_status = synx_hwfence_get_status; return SYNX_SUCCESS; } From 94a35a9774d86e76bfd91fab8dc6ea190d917ba4 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 24 Oct 2023 11:50:21 -0700 Subject: [PATCH 099/166] mm-drivers: hw_fence: revise hw-fence error message to simplify parsing Current hw-fence error message contains non-contiguous string. This change ensures that hw-fence error string has contiguous string to simplify automated parsing. Change-Id: I07332ada893d0a4d20d7646a0e80b5407cf43cad Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_debug.h | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index 4f754b7c28..deb8d7943c 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __HW_FENCE_DRV_DEBUG @@ -33,42 +33,42 @@ extern u32 msm_hw_fence_debug_level; #define HWFNC_ERR(fmt, ...) \ - pr_err("[hwfence:%s:%d][err][%pS] "fmt, __func__, __LINE__, \ + pr_err("[hwfence_error:%s:%d][%pS] "fmt, __func__, __LINE__, \ __builtin_return_address(0), ##__VA_ARGS__) #define HWFNC_ERR_ONCE(fmt, ...) \ - pr_err_once("[hwfence:%s:%d][err][%pS] "fmt, __func__, __LINE__, \ + pr_err_once("[hwfence_error:%s:%d][%pS] "fmt, __func__, __LINE__, \ __builtin_return_address(0), ##__VA_ARGS__) #define HWFNC_DBG_H(fmt, ...) \ - dprintk(HW_FENCE_HIGH, "[hwfence:%s:%d][dbgh]"fmt, __func__, __LINE__, ##__VA_ARGS__) + dprintk(HW_FENCE_HIGH, "[hwfence_dbgh:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) #define HWFNC_DBG_L(fmt, ...) \ - dprintk(HW_FENCE_LOW, "[hwfence:%s:%d][dbgl]"fmt, __func__, __LINE__, ##__VA_ARGS__) + dprintk(HW_FENCE_LOW, "[hwfence_dbgl:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) #define HWFNC_DBG_INFO(fmt, ...) \ - dprintk(HW_FENCE_INFO, "[hwfence:%s:%d][dbgi]"fmt, __func__, __LINE__, ##__VA_ARGS__) + dprintk(HW_FENCE_INFO, "[hwfence_dbgi:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) #define HWFNC_DBG_INIT(fmt, ...) \ - dprintk(HW_FENCE_INIT, "[hwfence:%s:%d][dbg]"fmt, __func__, __LINE__, ##__VA_ARGS__) + dprintk(HW_FENCE_INIT, "[hwfence_dbg:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) #define HWFNC_DBG_Q(fmt, ...) \ - dprintk(HW_FENCE_QUEUE, "[hwfence:%s:%d][dbgq]"fmt, __func__, __LINE__, ##__VA_ARGS__) + dprintk(HW_FENCE_QUEUE, "[hwfence_dbgq:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) #define HWFNC_DBG_LUT(fmt, ...) \ - dprintk(HW_FENCE_LUT, "[hwfence:%s:%d][dbglut]"fmt, __func__, __LINE__, ##__VA_ARGS__) + dprintk(HW_FENCE_LUT, "[hwfence_dbglut:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) #define HWFNC_DBG_IRQ(fmt, ...) \ - dprintk(HW_FENCE_IRQ, "[hwfence:%s:%d][dbgirq]"fmt, __func__, __LINE__, ##__VA_ARGS__) + dprintk(HW_FENCE_IRQ, "[hwfence_dbgirq:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) #define HWFNC_DBG_LOCK(fmt, ...) \ - dprintk(HW_FENCE_LOCK, "[hwfence:%s:%d][dbglock]"fmt, __func__, __LINE__, ##__VA_ARGS__) + dprintk(HW_FENCE_LOCK, "[hwfence_dbglock:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) #define HWFNC_DBG_DUMP(prio, fmt, ...) \ - dprintk(prio, "[hwfence:%s:%d][dbgd]"fmt, __func__, __LINE__, ##__VA_ARGS__) + dprintk(prio, "[hwfence_dbgd:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) #define HWFNC_WARN(fmt, ...) \ - pr_warn("[hwfence:%s:%d][warn][%pS] "fmt, __func__, __LINE__, \ + pr_warn("[hwfence_warn:%s:%d][%pS] "fmt, __func__, __LINE__, \ __builtin_return_address(0), ##__VA_ARGS__) int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data); From dcad47614f0d2a7373b680cf3a3c48804a1ac8f2 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 28 Jun 2023 14:22:45 -0700 Subject: [PATCH 100/166] mm-drivers: hw_fence: add native fencing inter-op support for synx fences Add support to register for wait on synx fences and update hw-fence table with synx handles when clients import native dma-fences. Change-Id: Ifcde93569280c935c9ce0a0619eee21dc0ff01b9 Signed-off-by: Grace An --- hw_fence/define_hw_fence.bzl | 1 + hw_fence/include/hw_fence_drv_interop.h | 64 ++++ hw_fence/include/hw_fence_drv_priv.h | 8 +- hw_fence/src/hw_fence_drv_debug.c | 10 +- hw_fence/src/hw_fence_drv_interop.c | 306 +++++++++++++++++++ hw_fence/src/hw_fence_drv_priv.c | 48 ++- hw_fence/src/msm_hw_fence_synx_translation.c | 150 +++------ 7 files changed, 470 insertions(+), 117 deletions(-) create mode 100644 hw_fence/include/hw_fence_drv_interop.h create mode 100644 hw_fence/src/hw_fence_drv_interop.c diff --git a/hw_fence/define_hw_fence.bzl b/hw_fence/define_hw_fence.bzl index 1598ed183b..d6d674f570 100644 --- a/hw_fence/define_hw_fence.bzl +++ b/hw_fence/define_hw_fence.bzl @@ -8,6 +8,7 @@ def _define_module(target, variant): name = "{}_msm_hw_fence".format(tv), srcs = [ "src/hw_fence_drv_debug.c", + "src/hw_fence_drv_interop.c", "src/hw_fence_drv_ipc.c", "src/hw_fence_drv_priv.c", "src/hw_fence_drv_utils.c", diff --git a/hw_fence/include/hw_fence_drv_interop.h b/hw_fence/include/hw_fence_drv_interop.h new file mode 100644 index 0000000000..c81a97b499 --- /dev/null +++ b/hw_fence/include/hw_fence_drv_interop.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __HW_FENCE_INTEROP_H +#define __HW_FENCE_INTEROP_H + +#include + +extern struct hw_fence_driver_data *hw_fence_drv_data; +extern struct synx_hwfence_interops synx_interops; + +/** + * hw_fence_interop_to_synx_status() - Converts hw-fence status code to synx status code + * + * @param code : hw-fence status code + * @return synx status code corresponding to hw-fence status code + */ +int hw_fence_interop_to_synx_status(int hw_fence_status_code); + +/** + * hw_fence_interop_to_synx_signal_status() - Converts hw-fence flags and error to + * synx signaling status + * + * @param flags : hw-fence flags + * @param error : hw-fence error + * + * @return synx signaling status + */ +u32 hw_fence_interop_to_synx_signal_status(u32 flags, u32 error); + +/** + * hw_fence_interop_to_hw_fence_error() - Convert synx signaling status to hw-fence error + * + * @param status : synx signaling status + * @return hw-fence error + */ +u32 hw_fence_interop_to_hw_fence_error(u32 status); + +/** + * hw_fence_interop_create_fence_from_import() - Creates hw-fence if necessary during synx_import, + * e.g. if there is no backing hw-fence for a synx fence. + * + * @param params : pointer to import params + * @return SYNX_SUCCESS upon success, -SYNX_INVALID if failed + */ +int hw_fence_interop_create_fence_from_import(struct synx_import_indv_params *params); + +/** + * hw_fence_interop_share_handle_status() - updates HW fence table with synx handle + * (if not already signaled) and return hw-fence handle by populating params.new_h_synx + * and returning signal status + * + * @param params : pointer to import params + * @param h_synx : synx handle + * @param signal_status: signalin status of fence + * + * @return SYNX_SUCCESS upon success, -SYNX_INVALID if failed + */ +int hw_fence_interop_share_handle_status(struct synx_import_indv_params *params, u32 h_synx, + u32 *signal_status); + +#endif /* __HW_FENCE_INTEROP_H */ diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 4122355fa4..79effdbff6 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -516,7 +516,7 @@ struct msm_hw_fence_event { * @seq_id: sequence id * @wait_client_mask: bitmask holding the waiting-clients of the fence * @fence_allocator: field to indicate the client_id that reserved the fence - * @fence_signal-client: + * @fence_signal_client: client that signaled the fence * @lock: this field is required to share information between the Driver & Driver || * Driver & FenceCTL. Needs to be 64-bit atomic inter-processor lock. * @flags: field to indicate the state of the fence @@ -530,6 +530,7 @@ struct msm_hw_fence_event { * @refcount: refcount on the hw-fence. This is split into multiple fields, see * HW_FENCE_HLOS_REFCOUNT_MASK and HW_FENCE_FCTL_REFCOUNT and HW_FENCE_DMA_FENCE_REFCOUNT * for more detail + * @h_synx: synx handle, nonzero if hw-fence is also backed by synx fence * @client_data: array of data optionally passed from and returned to clients waiting on the fence * during fence signaling */ @@ -549,7 +550,8 @@ struct msm_hw_fence { u64 fence_create_time; u64 fence_trigger_time; u64 fence_wait_time; - u64 refcount; + u32 refcount; + u32 h_synx; u64 client_data[HW_FENCE_MAX_CLIENTS_WITH_DATA]; }; @@ -604,6 +606,8 @@ int hw_fence_signal_fence(struct hw_fence_driver_data *drv_data, struct dma_fenc u32 error, bool release_ref); int hw_fence_get_flags_error(struct hw_fence_driver_data *drv_data, u64 hash, u64 *flags, u32 *error); +int hw_fence_update_hsynx(struct hw_fence_driver_data *drv_data, u64 hash, u32 h_synx, + bool wait_for); /* apis for internally managed dma-fence */ struct dma_fence *hw_dma_fence_init(struct msm_hw_fence_client *hw_fence_client, u64 context, diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 26219f8db4..2dcbe4f2dc 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -17,7 +17,7 @@ #define HFENCE_TBL_MSG \ "[%d]hfence[%u] v:%d err:%u ctx:%llu seq:%llu wait:0x%llx alloc:%d f:0x%llx child_cnt:%d"\ - "%s ct:%llu tt:%llu wt:%llu ref:0x%llx\n" + "%s ct:%llu tt:%llu wt:%llu ref:0x%x h_synx:%u\n" /* each hwfence parent includes one "32-bit" element + "," separator */ #define HW_FENCE_MAX_PARENTS_SUBLIST_DUMP (MSM_HW_FENCE_MAX_JOIN_PARENTS * 9) @@ -504,7 +504,8 @@ static void _dump_fence_helper(enum hw_fence_drv_prio prio, struct msm_hw_fence count, index, hw_fence->valid, hw_fence->error, hw_fence->ctx_id, hw_fence->seq_id, hw_fence->wait_client_mask, hw_fence->fence_allocator, hw_fence->flags, hw_fence->pending_child_cnt, parents_dump, hw_fence->fence_create_time, - hw_fence->fence_trigger_time, hw_fence->fence_wait_time, hw_fence->refcount); + hw_fence->fence_trigger_time, hw_fence->fence_wait_time, hw_fence->refcount, + hw_fence->h_synx); } void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash, @@ -527,7 +528,8 @@ static inline int _dump_fence(struct msm_hw_fence *hw_fence, char *buf, int len, cnt, index, hw_fence->valid, hw_fence->error, hw_fence->ctx_id, hw_fence->seq_id, hw_fence->wait_client_mask, hw_fence->fence_allocator, hw_fence->flags, hw_fence->pending_child_cnt, parents_dump, hw_fence->fence_create_time, - hw_fence->fence_trigger_time, hw_fence->fence_wait_time, hw_fence->refcount); + hw_fence->fence_trigger_time, hw_fence->fence_wait_time, hw_fence->refcount, + hw_fence->h_synx); return ret; } diff --git a/hw_fence/src/hw_fence_drv_interop.c b/hw_fence/src/hw_fence_drv_interop.c new file mode 100644 index 0000000000..c12f8784fe --- /dev/null +++ b/hw_fence/src/hw_fence_drv_interop.c @@ -0,0 +1,306 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include "msm_hw_fence.h" +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_debug.h" +#include "hw_fence_drv_interop.h" + +/** + * HW_FENCE_SYNX_FENCE_CLIENT_ID: + * ClientID for fences created to back synx fences + */ +#define HW_FENCE_SYNX_FENCE_CLIENT_ID (~(u32)1) + +struct synx_hwfence_interops synx_interops = { + .share_handle_status = NULL, + .get_fence = NULL, + .notify_recover = NULL, +}; + +int hw_fence_interop_to_synx_status(int hw_fence_status_code) +{ + int synx_status_code; + + switch (hw_fence_status_code) { + case 0: + synx_status_code = SYNX_SUCCESS; + break; + case -ENOMEM: + synx_status_code = -SYNX_NOMEM; + break; + case -EPERM: + synx_status_code = -SYNX_NOPERM; + break; + case -ETIMEDOUT: + synx_status_code = -SYNX_TIMEOUT; + break; + case -EALREADY: + synx_status_code = -SYNX_ALREADY; + break; + case -ENOENT: + synx_status_code = -SYNX_NOENT; + break; + case -EINVAL: + synx_status_code = -SYNX_INVALID; + break; + case -EBUSY: + synx_status_code = -SYNX_BUSY; + break; + default: + synx_status_code = hw_fence_status_code; + break; + } + + return synx_status_code; +} + +u32 hw_fence_interop_to_synx_signal_status(u32 flags, u32 error) +{ + u32 status; + + if (!(flags & MSM_HW_FENCE_FLAG_SIGNAL)) { + status = SYNX_STATE_ACTIVE; + goto end; + } + + switch (error) { + case 0: + status = SYNX_STATE_SIGNALED_SUCCESS; + break; + case MSM_HW_FENCE_ERROR_RESET: + status = SYNX_STATE_SIGNALED_SSR; + break; + default: + status = error; + break; + } + +end: + HWFNC_DBG_L("fence flags:%u err:%u status:%u\n", flags, error, status); + + return status; +} + +u32 hw_fence_interop_to_hw_fence_error(u32 status) +{ + u32 error; + + switch (status) { + case SYNX_STATE_INVALID: + HWFNC_ERR("converting error status for invalid fence\n"); + error = SYNX_INVALID; + break; + case SYNX_STATE_ACTIVE: + HWFNC_ERR("converting error status for unsignaled fence\n"); + error = 0; + break; + case SYNX_STATE_SIGNALED_SUCCESS: + error = 0; + break; + case SYNX_STATE_SIGNALED_SSR: + error = MSM_HW_FENCE_ERROR_RESET; + break; + default: + error = status; + break; + } + HWFNC_DBG_L("fence status:%u err:%u\n", status, error); + + return error; +} + +static int _update_interop_fence(struct synx_import_indv_params *params, u64 handle) +{ + u32 signal_status; + int ret, error; + + if (!params->new_h_synx || !synx_interops.share_handle_status) { + HWFNC_ERR("invalid new_h_synx:0x%pK share_handle_status:0x%pK\n", + params->new_h_synx, synx_interops.share_handle_status); + return -EINVAL; + } + + ret = synx_interops.share_handle_status(params, handle, &signal_status); + if (ret || signal_status == SYNX_STATE_INVALID) { + HWFNC_ERR("failed to share handle and signal status handle:%llu ret:%d\n", + handle, ret); + /* destroy reference held by signal*/ + hw_fence_destroy_refcount(hw_fence_drv_data, handle, HW_FENCE_FCTL_REFCOUNT); + + return ret; + } + if (signal_status != SYNX_STATE_ACTIVE) { + error = hw_fence_interop_to_hw_fence_error(signal_status); + ret = hw_fence_signal_fence(hw_fence_drv_data, NULL, handle, error, true); + if (ret) { + HWFNC_ERR("Failed to signal hwfence handle:%llu error:%u\n", handle, error); + return ret; + } + } + + /* store h_synx for debugging purposes */ + ret = hw_fence_update_hsynx(hw_fence_drv_data, handle, *params->new_h_synx, false); + if (ret) + HWFNC_ERR("Failed to update hwfence handle:%llu h_synx:%u\n", handle, + *params->new_h_synx); + + return ret; +} + +int hw_fence_interop_create_fence_from_import(struct synx_import_indv_params *params) +{ + struct msm_hw_fence_client interop_client; + struct dma_fence *fence; + int destroy_ret, ret; + unsigned long flags; + u64 handle; + + if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->fence)) { + HWFNC_ERR("invalid params:0x%pK fence:0x%pK\n", + params, IS_ERR_OR_NULL(params) ? NULL : params->fence); + return -SYNX_INVALID; + } + + fence = (struct dma_fence *)params->fence; + spin_lock_irqsave(fence->lock, flags); + + /* hw-fence already present, so no need to create new hw-fence */ + if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + spin_unlock_irqrestore(fence->lock, flags); + return SYNX_SUCCESS; + } + + if (!test_bit(SYNX_NATIVE_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + spin_unlock_irqrestore(fence->lock, flags); + HWFNC_ERR("Cannot import native sw dma-fence ctx:%llu seq:%llu flags:0x%lx\n", + fence->context, fence->seqno, fence->flags); + return -SYNX_INVALID; + } + + interop_client.client_id = HW_FENCE_SYNX_FENCE_CLIENT_ID; + ret = hw_fence_create(hw_fence_drv_data, &interop_client, fence->context, + fence->seqno, &handle); + if (ret) { + HWFNC_ERR("failed to create interop fence client:%d ctx:%llu seq:%llu ret:%d\n", + interop_client.client_id, fence->context, fence->seqno, ret); + spin_unlock_irqrestore(fence->lock, flags); + return hw_fence_interop_to_synx_status(ret); + } + set_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags); + spin_unlock_irqrestore(fence->lock, flags); + + ret = _update_interop_fence(params, handle); + if (ret) { + HWFNC_ERR("failed to exchange interop handles handle:%llu ret:%d\n", handle, ret); + goto error; + } + + ret = hw_fence_add_callback(hw_fence_drv_data, fence, handle); + if (ret) + HWFNC_ERR("failed to add signal callback for interop fence handle:%llu ret:%d\n", + handle, ret); + +error: + /* destroy reference held by creator of fence */ + destroy_ret = hw_fence_destroy_with_hash(hw_fence_drv_data, &interop_client, + handle); + if (destroy_ret) { + HWFNC_ERR("failed destroy interop fence client:%d handle:%llu ret:%d\n", + interop_client.client_id, handle, ret); + ret = destroy_ret; + } + + return hw_fence_interop_to_synx_status(ret); +} + +int hw_fence_interop_share_handle_status(struct synx_import_indv_params *params, u32 h_synx, + u32 *signal_status) +{ + struct msm_hw_fence *hw_fence; + int destroy_ret, ret = 0; + struct dma_fence *fence; + u64 flags, handle; + bool is_signaled; + u32 error; + + if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->new_h_synx) || + !(params->flags & SYNX_IMPORT_DMA_FENCE) || + (params->flags & SYNX_IMPORT_SYNX_FENCE) || IS_ERR_OR_NULL(params->fence)) { + HWFNC_ERR("invalid params:0x%pK h_synx:0x%pK flags:0x%x fence:0x%pK\n", + params, IS_ERR_OR_NULL(params) ? NULL : params->new_h_synx, + IS_ERR_OR_NULL(params) ? 0 : params->flags, + IS_ERR_OR_NULL(params) ? NULL : params->fence); + return -SYNX_INVALID; + } + fence = params->fence; + if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + HWFNC_ERR("invalid hwfence ctx:%llu seqno:%llu flags:%lx\n", fence->context, + fence->seqno, fence->flags); + return -SYNX_INVALID; + } + + hw_fence = hw_fence_find_with_dma_fence(hw_fence_drv_data, NULL, fence, &handle, + &is_signaled, false); + + if (is_signaled) { + *signal_status = dma_fence_get_status(fence); + return SYNX_SUCCESS; + } + if (!hw_fence) { + HWFNC_ERR("failed to find hw-fence for ctx:%llu seq:%llu\n", fence->context, + fence->seqno); + return -SYNX_INVALID; + } + + ret = hw_fence_get_flags_error(hw_fence_drv_data, handle, &flags, &error); + if (ret) { + HWFNC_ERR("Failed to get flags and error hwfence handle:%llu\n", handle); + goto end; + } + + *signal_status = hw_fence_interop_to_synx_signal_status(flags, error); + if (*signal_status >= SYNX_STATE_SIGNALED_SUCCESS) + goto end; + + /* update h_synx to register the synx framework as a waiter on the hw-fence */ + ret = hw_fence_update_hsynx(hw_fence_drv_data, handle, h_synx, true); + if (ret) { + HWFNC_ERR("failed to set h_synx for hw-fence handle:%llu\n", handle); + goto end; + } + *params->new_h_synx = (u32)handle; + +end: + /* release reference held to find hw-fence */ + destroy_ret = hw_fence_destroy_with_hash(hw_fence_drv_data, NULL, handle); + if (destroy_ret) { + HWFNC_ERR("Failed to decrement refcount on hw-fence handle:%llu\n", handle); + ret = destroy_ret; + } + + return hw_fence_interop_to_synx_status(ret); +} + +int synx_hwfence_init_interops(struct synx_hwfence_interops *synx_ops, + struct synx_hwfence_interops *hwfence_ops) +{ + if (IS_ERR_OR_NULL(synx_ops) || IS_ERR_OR_NULL(hwfence_ops)) { + HWFNC_ERR("invalid params synx_ops:0x%pK hwfence_ops:0x%pK\n", synx_ops, + hwfence_ops); + return -EINVAL; + } + + synx_interops.share_handle_status = synx_ops->share_handle_status; + synx_interops.get_fence = synx_ops->get_fence; + synx_interops.notify_recover = synx_ops->notify_recover; + hwfence_ops->share_handle_status = hw_fence_interop_share_handle_status; + + return 0; +} +EXPORT_SYMBOL_GPL(synx_hwfence_init_interops); diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 6168c31e19..962d4afe3a 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1018,7 +1018,7 @@ static int _unreserve_hw_fence(struct hw_fence_driver_data *drv_data, hw_fence->valid = 0; } - HWFNC_DBG_LUT("Unreserved fence client:%d ctx:%llu seq:%llu hash:%u refcount:%llx\n", + HWFNC_DBG_LUT("Unreserved fence client:%d ctx:%llu seq:%llu hash:%u refcount:%x\n", client_id, context, seqno, hash, hw_fence->refcount); return 0; @@ -1041,7 +1041,7 @@ int hw_fence_destroy_refcount(struct hw_fence_driver_data *drv_data, u64 hash, u hw_fence->refcount &= ~ref; } else { GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ - HWFNC_ERR("fence ctx:%llu seq:%llu hash:%llu ref:0x%llx before destroy ref:0x%x\n", + HWFNC_ERR("fence ctx:%llu seq:%llu hash:%llu ref:0x%x before destroy ref:0x%x\n", hw_fence->ctx_id, hw_fence->seq_id, hash, hw_fence->refcount, ref); /* keep hw-fence in table for debugging purposes */ return -EINVAL; @@ -1054,7 +1054,7 @@ int hw_fence_destroy_refcount(struct hw_fence_driver_data *drv_data, u64 hash, u } GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ - HWFNC_DBG_H("Removed 0x%x refcount on fence hash:%llu ref:0x%llx\n", ref, hash, + HWFNC_DBG_H("Removed 0x%x refcount on fence hash:%llu ref:0x%x\n", ref, hash, hw_fence->refcount); return ret; @@ -1100,7 +1100,7 @@ static int _fence_found(struct hw_fence_driver_data *drv_data, */ hw_fence->refcount++; - HWFNC_DBG_LUT("Found fence client:%d ctx:%llu seq:%llu hash:%u ref:0x%llx\n", + HWFNC_DBG_LUT("Found fence client:%d ctx:%llu seq:%llu hash:%u ref:0x%x\n", client_id, context, seqno, hash, hw_fence->refcount); return 0; @@ -1555,7 +1555,7 @@ end: GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ if (ret) { - HWFNC_ERR("fence client:%d ctx:%llu seq:%llu hash:%llu ref:0x%llx before decr\n", + HWFNC_ERR("fence client:%d ctx:%llu seq:%llu hash:%llu ref:0x%x before decr\n", client_id, hw_fence->ctx_id, hw_fence->seq_id, hash, hw_fence->refcount); return ret; } @@ -2294,8 +2294,8 @@ int hw_fence_signal_fence(struct hw_fence_driver_data *drv_data, struct dma_fenc { struct msm_hw_fence *hw_fence; - if (!drv_data || !fence) { - HWFNC_ERR("bad params drv_data:0x%pK fence:0x%pK\n", drv_data, fence); + if (!drv_data) { + HWFNC_ERR("bad drv_data\n"); return -EINVAL; } @@ -2305,7 +2305,7 @@ int hw_fence_signal_fence(struct hw_fence_driver_data *drv_data, struct dma_fenc return -EINVAL; } - if (hw_fence->ctx_id != fence->context || hw_fence->seq_id != fence->seqno) { + if (fence && (hw_fence->ctx_id != fence->context || hw_fence->seq_id != fence->seqno)) { HWFNC_ERR("invalid hfence hash:%llu ctx:%llu seq:%llu expected ctx:%llu seq:%llu\n", hash, hw_fence->ctx_id, hw_fence->seq_id, fence->context, fence->seqno); return -EINVAL; @@ -2407,3 +2407,35 @@ int hw_fence_get_flags_error(struct hw_fence_driver_data *drv_data, u64 hash, u6 return 0; } + +int hw_fence_update_hsynx(struct hw_fence_driver_data *drv_data, u64 hash, u32 h_synx, + bool wait_for) +{ + struct msm_hw_fence *hw_fence; + int ret = 0; + + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, drv_data->hw_fences_tbl, hash); + if (!hw_fence) { + HWFNC_ERR("Failed to get hw-fence for hash:%llu\n", hash); + return -EINVAL; + } + + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ + if (hw_fence->h_synx && hw_fence->h_synx != h_synx) { + ret = -EINVAL; + goto error; + } + hw_fence->h_synx = h_synx; + if (wait_for) + hw_fence->fence_wait_time = hw_fence_get_qtime(drv_data); +error: + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + + wmb(); /* update table */ + + if (ret) + HWFNC_ERR("setting h_synx:%u for hw-fence hash:%llu with existing h_synx:%u\n", + h_synx, hash, hw_fence->h_synx); + + return ret; +} diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 1127596724..5dd88d6cf0 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -7,9 +7,11 @@ #include #include #include +#include #include "msm_hw_fence.h" #include "hw_fence_drv_priv.h" #include "hw_fence_drv_debug.h" +#include "hw_fence_drv_interop.h" /** * MAX_SUPPORTED_DPU0: @@ -17,43 +19,6 @@ */ #define MAX_SUPPORTED_DPU0 (HW_FENCE_CLIENT_ID_CTL5 - HW_FENCE_CLIENT_ID_CTL0) -static int to_synx_status(int hw_fence_status_code) -{ - int synx_status_code; - - switch (hw_fence_status_code) { - case 0: - synx_status_code = SYNX_SUCCESS; - break; - case -ENOMEM: - synx_status_code = -SYNX_NOMEM; - break; - case -EPERM: - synx_status_code = -SYNX_NOPERM; - break; - case -ETIMEDOUT: - synx_status_code = -SYNX_TIMEOUT; - break; - case -EALREADY: - synx_status_code = -SYNX_ALREADY; - break; - case -ENOENT: - synx_status_code = -SYNX_NOENT; - break; - case -EINVAL: - synx_status_code = -SYNX_INVALID; - break; - case -EBUSY: - synx_status_code = -SYNX_BUSY; - break; - default: - synx_status_code = hw_fence_status_code; - break; - } - - return synx_status_code; -} - static enum hw_fence_client_id _get_hw_fence_client_id(enum synx_client_id synx_client_id) { enum hw_fence_client_id hw_fence_client_id; @@ -96,61 +61,6 @@ static bool is_hw_fence_client(enum synx_client_id synx_client_id) && synx_client_id < SYNX_HW_FENCE_CLIENT_END; } -static u32 _to_synx_signal_status(u32 flags, u32 error) -{ - u32 status; - - if (!(flags & MSM_HW_FENCE_FLAG_SIGNAL)) { - status = SYNX_STATE_ACTIVE; - goto end; - } - - switch (error) { - case 0: - status = SYNX_STATE_SIGNALED_SUCCESS; - break; - case MSM_HW_FENCE_ERROR_RESET: - status = SYNX_STATE_SIGNALED_SSR; - break; - default: - status = error; - break; - } - -end: - HWFNC_DBG_L("fence flags:%u err:%u status:%u\n", flags, error, status); - - return status; -} - -static u32 _to_hwfence_fence_error(u32 status) -{ - u32 error; - - switch (status) { - case SYNX_STATE_INVALID: - HWFNC_ERR("converting error status for invalid fence\n"); - error = SYNX_INVALID; - break; - case SYNX_STATE_ACTIVE: - HWFNC_ERR("converting error status for unsignaled fence\n"); - error = 0; - break; - case SYNX_STATE_SIGNALED_SUCCESS: - error = 0; - break; - case SYNX_STATE_SIGNALED_SSR: - error = MSM_HW_FENCE_ERROR_RESET; - break; - default: - error = status; - break; - } - HWFNC_DBG_L("fence status:%u err:%u\n", status, error); - - return error; -} - struct synx_session *synx_hwfence_initialize(struct synx_initialization_params *params) { struct synx_session *session = NULL; @@ -181,7 +91,7 @@ struct synx_session *synx_hwfence_initialize(struct synx_initialization_params * kfree(session); HWFNC_ERR("failed to initialize synx_id:%d ret:%ld\n", params->id, PTR_ERR(client_handle)); - return ERR_PTR(to_synx_status(PTR_ERR(client_handle))); + return ERR_PTR(hw_fence_interop_to_synx_status(PTR_ERR(client_handle))); } session->client = client_handle; session->type = params->id; @@ -207,7 +117,7 @@ static int synx_hwfence_uninitialize(struct synx_session *session) else kfree(session); - return to_synx_status(ret); + return hw_fence_interop_to_synx_status(ret); } static int synx_hwfence_create(struct synx_session *session, struct synx_create_params *params) @@ -243,7 +153,7 @@ static int synx_hwfence_create(struct synx_session *session, struct synx_create_ if (ret) { HWFNC_ERR("synx_id:%d failed create fence:0x%pK flags:0x%x ret:%d\n", session->type, params->fence, params->flags, ret); - return to_synx_status(ret); + return hw_fence_interop_to_synx_status(ret); } if (handle > U32_MAX) { HWFNC_ERR("synx_id:%d fence handle:%llu would overflow h_synx\n", session->type, @@ -272,7 +182,7 @@ static int synx_hwfence_release(struct synx_session *session, u32 h_synx) HWFNC_ERR("synx_id:%d failed to destroy fence h_synx:%u ret:%d\n", session->type, h_synx, ret); - return to_synx_status(ret); + return hw_fence_interop_to_synx_status(ret); } static int synx_hwfence_signal(struct synx_session *session, u32 h_synx, @@ -290,13 +200,13 @@ static int synx_hwfence_signal(struct synx_session *session, u32 h_synx, return -SYNX_INVALID; } - error = _to_hwfence_fence_error(status); + error = hw_fence_interop_to_hw_fence_error(status); ret = msm_hw_fence_update_txq(session->client, h_synx, 0, error); if (ret) HWFNC_ERR("synx_id:%d failed to signal fence h_synx:%u status:%d ret:%d\n", session->type, h_synx, status, ret); - return to_synx_status(ret); + return hw_fence_interop_to_synx_status(ret); } int synx_hwfence_recover(enum synx_client_id id) @@ -313,7 +223,7 @@ int synx_hwfence_recover(enum synx_client_id id) if (ret) HWFNC_ERR("synx_id:%d failed to recover ret:%d\n", id, ret); - return to_synx_status(ret); + return hw_fence_interop_to_synx_status(ret); } EXPORT_SYMBOL_GPL(synx_hwfence_recover); @@ -355,13 +265,15 @@ static int synx_hwfence_get_status(struct synx_session *session, u32 h_synx) return SYNX_STATE_INVALID; } - return _to_synx_signal_status(flags, error); + return hw_fence_interop_to_synx_signal_status(flags, error); } static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params *params) { + struct dma_fence_array *array; + struct dma_fence *fence; u64 handle; - int ret; + int ret, i; if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->new_h_synx) || @@ -374,12 +286,43 @@ static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params return -SYNX_INVALID; } + fence = (struct dma_fence *)params->fence; + array = to_dma_fence_array(fence); + if (array) { + for (i = 0; i < array->num_fences; i++) { + if (dma_fence_is_array(array->fences[i])) { + HWFNC_ERR("nested fence arrays not supported idx:%d fence:0x%pK\n", + i, array->fences[i]); + ret = -SYNX_INVALID; + break; + } + + params->fence = array->fences[i]; + ret = hw_fence_interop_create_fence_from_import(params); + if (ret) { + HWFNC_ERR("failed to back dma_fence_array idx:%d fence:0x%pK\n", + i, array->fences[i]); + params->fence = fence; + break; + } + } + params->fence = fence; + } else { + ret = hw_fence_interop_create_fence_from_import(params); + } + + if (ret) { + HWFNC_ERR("failed to back dma-fence:0x%pK with hw-fence(s) ret:%d\n", + params->fence, ret); + return ret; + } + ret = msm_hw_fence_wait_update_v2(client, (struct dma_fence **)¶ms->fence, &handle, NULL, 1, true); if (ret) { HWFNC_ERR("failed to import fence:0x%pK flags:0x%x ret:%d\n", params->fence, params->flags, ret); - return to_synx_status(ret); + goto error; } if (handle > U32_MAX) { HWFNC_ERR("fence handle:%llu would overflow new_h_synx\n", handle); @@ -389,7 +332,8 @@ static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params } *params->new_h_synx = handle; - return SYNX_SUCCESS; +error: + return hw_fence_interop_to_synx_status(ret); } static int synx_hwfence_import_arr(void *client, struct synx_import_arr_params *params) From 8c0170c7b1ca94147476253621c3678cf2e29aee Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 15 Aug 2023 13:17:47 -0700 Subject: [PATCH 101/166] mm-drivers: hw_fence: add handle-based interop support Add support for hw fence client to import synx handle and for synx client to import hw-fence handle. Change-Id: I46bf63b6cef6e01244d3bd0a2b366f856ee0dc1d Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_interop.h | 14 +++ hw_fence/src/hw_fence_drv_interop.c | 21 ++++ hw_fence/src/msm_hw_fence_synx_translation.c | 105 +++++++++++++------ 3 files changed, 107 insertions(+), 33 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_interop.h b/hw_fence/include/hw_fence_drv_interop.h index c81a97b499..a56dfbc89e 100644 --- a/hw_fence/include/hw_fence_drv_interop.h +++ b/hw_fence/include/hw_fence_drv_interop.h @@ -11,6 +11,11 @@ extern struct hw_fence_driver_data *hw_fence_drv_data; extern struct synx_hwfence_interops synx_interops; +/** + * HW_FENCE_HANDLE_INDEX_MASK: Mask to extract table index from hw-fence handle + */ +#define HW_FENCE_HANDLE_INDEX_MASK GENMASK(16, 0) + /** * hw_fence_interop_to_synx_status() - Converts hw-fence status code to synx status code * @@ -61,4 +66,13 @@ int hw_fence_interop_create_fence_from_import(struct synx_import_indv_params *pa int hw_fence_interop_share_handle_status(struct synx_import_indv_params *params, u32 h_synx, u32 *signal_status); +/** + * hw_fence_interop_get_fence() – return the dma-fence associated with the given handle + * + * @param h_synx : hw-fence handle + * + * @return dma-fence associated with hw-fence handle. Null or error pointer in case of error. + */ +void *hw_fence_interop_get_fence(u32 h_synx); + #endif /* __HW_FENCE_INTEROP_H */ diff --git a/hw_fence/src/hw_fence_drv_interop.c b/hw_fence/src/hw_fence_drv_interop.c index c12f8784fe..496036e88f 100644 --- a/hw_fence/src/hw_fence_drv_interop.c +++ b/hw_fence/src/hw_fence_drv_interop.c @@ -287,6 +287,26 @@ end: return hw_fence_interop_to_synx_status(ret); } +void *hw_fence_interop_get_fence(u32 h_synx) +{ + struct dma_fence *fence; + + if (!(h_synx & SYNX_HW_FENCE_HANDLE_FLAG)) { + HWFNC_ERR("invalid h_synx:%u does not have hw-fence handle bit set:%lu\n", + h_synx, SYNX_HW_FENCE_HANDLE_FLAG); + return ERR_PTR(-SYNX_INVALID); + } + + h_synx &= HW_FENCE_HANDLE_INDEX_MASK; + fence = hw_fence_dma_fence_find(hw_fence_drv_data, h_synx, true); + if (!fence) { + HWFNC_ERR("failed to find dma-fence for hw-fence idx:%u\n", h_synx); + return ERR_PTR(-SYNX_INVALID); + } + + return (void *)fence; +} + int synx_hwfence_init_interops(struct synx_hwfence_interops *synx_ops, struct synx_hwfence_interops *hwfence_ops) { @@ -300,6 +320,7 @@ int synx_hwfence_init_interops(struct synx_hwfence_interops *synx_ops, synx_interops.get_fence = synx_ops->get_fence; synx_interops.notify_recover = synx_ops->notify_recover; hwfence_ops->share_handle_status = hw_fence_interop_share_handle_status; + hwfence_ops->get_fence = hw_fence_interop_get_fence; return 0; } diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 5dd88d6cf0..cdec5989e2 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -162,7 +162,7 @@ static int synx_hwfence_create(struct synx_session *session, struct synx_create_ msm_hw_fence_destroy_with_handle(session->client, handle); return -SYNX_INVALID; } - *params->h_synx = handle; + *params->h_synx = SYNX_HW_FENCE_HANDLE_FLAG | handle; return SYNX_SUCCESS; } @@ -171,12 +171,14 @@ static int synx_hwfence_release(struct synx_session *session, u32 h_synx) { int ret; - if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) { + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || + !(h_synx & SYNX_HW_FENCE_HANDLE_FLAG)) { HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session, IS_ERR_OR_NULL(session) ? -1 : session->type); return -SYNX_INVALID; } + h_synx &= HW_FENCE_HANDLE_INDEX_MASK; ret = msm_hw_fence_destroy_with_handle(session->client, h_synx); if (ret) HWFNC_ERR("synx_id:%d failed to destroy fence h_synx:%u ret:%d\n", session->type, @@ -192,15 +194,17 @@ static int synx_hwfence_signal(struct synx_session *session, u32 h_synx, int ret; if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || + !(h_synx & SYNX_HW_FENCE_HANDLE_FLAG) || !(status == SYNX_STATE_SIGNALED_SUCCESS || status == SYNX_STATE_SIGNALED_CANCEL || status > SYNX_STATE_SIGNALED_MAX)) { - HWFNC_ERR("invalid session:0x%pK synx_id:%d status:%u\n", session, - IS_ERR_OR_NULL(session) ? -1 : session->type, status); + HWFNC_ERR("invalid session:0x%pK synx_id:%d h_synx:%u status:%u\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type, h_synx, status); return -SYNX_INVALID; } error = hw_fence_interop_to_hw_fence_error(status); + h_synx &= HW_FENCE_HANDLE_INDEX_MASK; ret = msm_hw_fence_update_txq(session->client, h_synx, 0, error); if (ret) HWFNC_ERR("synx_id:%d failed to signal fence h_synx:%u status:%d ret:%d\n", @@ -229,22 +233,14 @@ EXPORT_SYMBOL_GPL(synx_hwfence_recover); static void *synx_hwfence_get_fence(struct synx_session *session, u32 h_synx) { - struct dma_fence *fence = NULL; - - if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) { - HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session, - IS_ERR_OR_NULL(session) ? -1 : session->type); + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || + !(h_synx & SYNX_HW_FENCE_HANDLE_FLAG)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d h_synx:%u\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type, h_synx); return ERR_PTR(-SYNX_INVALID); } - fence = hw_fence_dma_fence_find(hw_fence_drv_data, h_synx, true); - - /* add a reference to the dma-fence, this must be released by the caller */ - if (IS_ERR_OR_NULL(fence)) - HWFNC_ERR("synx_id:%d failed to get fence for h_synx:%u ret:%ld\n", session->type, - h_synx, PTR_ERR(fence)); - - return (void *)fence; + return (void *)hw_fence_interop_get_fence(h_synx); } static int synx_hwfence_get_status(struct synx_session *session, u32 h_synx) @@ -253,12 +249,14 @@ static int synx_hwfence_get_status(struct synx_session *session, u32 h_synx) u32 error; int ret; - if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) { - HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session, - IS_ERR_OR_NULL(session) ? -1 : session->type); + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || + !(h_synx & SYNX_HW_FENCE_HANDLE_FLAG)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d h_synx:%u\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type, h_synx); return SYNX_STATE_INVALID; } + h_synx &= HW_FENCE_HANDLE_INDEX_MASK; ret = hw_fence_get_flags_error(hw_fence_drv_data, h_synx, &flags, &error); if (ret) { HWFNC_ERR("Failed to get status for client:%d h_synx:%u\n", session->type, h_synx); @@ -268,24 +266,13 @@ static int synx_hwfence_get_status(struct synx_session *session, u32 h_synx) return hw_fence_interop_to_synx_signal_status(flags, error); } -static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params *params) +static int synx_hwfence_import_fence(void *client, struct synx_import_indv_params *params) { struct dma_fence_array *array; struct dma_fence *fence; u64 handle; int ret, i; - if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || - IS_ERR_OR_NULL(params->new_h_synx) || - !(params->flags & SYNX_IMPORT_DMA_FENCE) || - (params->flags & SYNX_IMPORT_SYNX_FENCE) || IS_ERR_OR_NULL(params->fence)) { - HWFNC_ERR("invalid client:0x%pK params:0x%pK h_synx:0x%pK flags:0x%x fence:0x%pK\n", - client, params, IS_ERR_OR_NULL(params) ? NULL : params->new_h_synx, - IS_ERR_OR_NULL(params) ? 0 : params->flags, - IS_ERR_OR_NULL(params) ? NULL : params->fence); - return -SYNX_INVALID; - } - fence = (struct dma_fence *)params->fence; array = to_dma_fence_array(fence); if (array) { @@ -330,12 +317,64 @@ static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params NULL, 1, false); return -SYNX_INVALID; } - *params->new_h_synx = handle; + *params->new_h_synx = SYNX_HW_FENCE_HANDLE_FLAG | handle; error: return hw_fence_interop_to_synx_status(ret); } +static int synx_hwfence_import_handle(void *client, struct synx_import_indv_params *params) +{ + struct synx_import_indv_params fence_params; + u32 h_synx; + int ret; + + if (!synx_interops.get_fence) { + HWFNC_ERR("invalid synx_get_fence:0x%pK\n", synx_interops.get_fence); + return -SYNX_INVALID; + } + h_synx = *(u32 *)params->fence; + if (h_synx & SYNX_HW_FENCE_HANDLE_FLAG) + fence_params.fence = hw_fence_interop_get_fence(h_synx); + else + fence_params.fence = synx_interops.get_fence(h_synx); + if (IS_ERR_OR_NULL(fence_params.fence)) { + HWFNC_ERR("failed to get native fence h_synx:%u ret:0x%pK\n", h_synx, + fence_params.fence); + return -SYNX_INVALID; + } + fence_params.new_h_synx = params->new_h_synx; + fence_params.flags = SYNX_IMPORT_DMA_FENCE; + ret = synx_hwfence_import_fence(client, &fence_params); + dma_fence_put(fence_params.fence); /* release dma-fence ref acquired by get_fence */ + + return ret; +} + +static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params *params) +{ + if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || + IS_ERR_OR_NULL(params->new_h_synx) || + !((params->flags & SYNX_IMPORT_DMA_FENCE) || + (params->flags & SYNX_IMPORT_SYNX_FENCE)) || + IS_ERR_OR_NULL(params->fence)) { + HWFNC_ERR("invalid client:0x%pK params:0x%pK h_synx:0x%pK flags:0x%x fence:0x%pK\n", + client, params, IS_ERR_OR_NULL(params) ? NULL : params->new_h_synx, + IS_ERR_OR_NULL(params) ? 0 : params->flags, + IS_ERR_OR_NULL(params) ? NULL : params->fence); + return -SYNX_INVALID; + } + + if (params->flags & SYNX_IMPORT_DMA_FENCE) + return synx_hwfence_import_fence(client, params); + else if (params->flags & SYNX_IMPORT_SYNX_FENCE) + return synx_hwfence_import_handle(client, params); + + HWFNC_ERR("invalid import flags:0x%x\n", params->flags); + + return -SYNX_INVALID; +} + static int synx_hwfence_import_arr(void *client, struct synx_import_arr_params *params) { int i, ret; From 629f2d0d5d8a8e34b8d58fbbe70d8424e0ded23a Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 20 Jul 2023 13:50:03 -0700 Subject: [PATCH 102/166] mm-drivers: hw-fence: support hw-fence import of native dma-fence Add support for HW Fence import by a consumer of a dma-fence from a native dma-fence producer. Change-Id: I58f446211a603b43c5ecb4c24d83bddc00f5f40e Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_interop.c | 49 ++++++++++++++++++----------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_interop.c b/hw_fence/src/hw_fence_drv_interop.c index 496036e88f..c0a4601d9b 100644 --- a/hw_fence/src/hw_fence_drv_interop.c +++ b/hw_fence/src/hw_fence_drv_interop.c @@ -17,6 +17,12 @@ */ #define HW_FENCE_SYNX_FENCE_CLIENT_ID (~(u32)1) +/** + * HW_FENCE_SYNX_FENCE_CLIENT_ID: + * ClientID for fences created to back fences with native dma-fence producers + */ +#define HW_FENCE_NATIVE_FENCE_CLIENT_ID (~(u32)2) + struct synx_hwfence_interops synx_interops = { .share_handle_status = NULL, .get_fence = NULL, @@ -155,10 +161,11 @@ static int _update_interop_fence(struct synx_import_indv_params *params, u64 han int hw_fence_interop_create_fence_from_import(struct synx_import_indv_params *params) { - struct msm_hw_fence_client interop_client; + struct msm_hw_fence_client dummy_client; struct dma_fence *fence; int destroy_ret, ret; unsigned long flags; + bool is_synx; u64 handle; if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->fence)) { @@ -175,44 +182,48 @@ int hw_fence_interop_create_fence_from_import(struct synx_import_indv_params *pa spin_unlock_irqrestore(fence->lock, flags); return SYNX_SUCCESS; } + is_synx = test_bit(SYNX_NATIVE_FENCE_FLAG_ENABLED_BIT, &fence->flags); - if (!test_bit(SYNX_NATIVE_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { - spin_unlock_irqrestore(fence->lock, flags); - HWFNC_ERR("Cannot import native sw dma-fence ctx:%llu seq:%llu flags:0x%lx\n", - fence->context, fence->seqno, fence->flags); - return -SYNX_INVALID; - } - - interop_client.client_id = HW_FENCE_SYNX_FENCE_CLIENT_ID; - ret = hw_fence_create(hw_fence_drv_data, &interop_client, fence->context, + /* only synx clients can signal synx fences; no one can signal sw dma-fence from fw */ + dummy_client.client_id = is_synx ? HW_FENCE_SYNX_FENCE_CLIENT_ID : + HW_FENCE_NATIVE_FENCE_CLIENT_ID; + ret = hw_fence_create(hw_fence_drv_data, &dummy_client, fence->context, fence->seqno, &handle); if (ret) { - HWFNC_ERR("failed to create interop fence client:%d ctx:%llu seq:%llu ret:%d\n", - interop_client.client_id, fence->context, fence->seqno, ret); + HWFNC_ERR("failed create fence client:%d ctx:%llu seq:%llu is_synx:%s ret:%d\n", + dummy_client.client_id, fence->context, fence->seqno, + is_synx ? "true" : "false", ret); spin_unlock_irqrestore(fence->lock, flags); return hw_fence_interop_to_synx_status(ret); } set_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags); spin_unlock_irqrestore(fence->lock, flags); - ret = _update_interop_fence(params, handle); + if (is_synx) + /* exchange handles and register fence controller for wait on synx fence */ + ret = _update_interop_fence(params, handle); + else + /* native dma-fences do not have a signaling client, remove ref for fctl signal */ + ret = hw_fence_destroy_refcount(hw_fence_drv_data, handle, HW_FENCE_FCTL_REFCOUNT); + if (ret) { - HWFNC_ERR("failed to exchange interop handles handle:%llu ret:%d\n", handle, ret); + HWFNC_ERR("failed to update for signaling client handle:%llu is_synx:%s ret:%d\n", + handle, is_synx ? "true" : "false", ret); goto error; } ret = hw_fence_add_callback(hw_fence_drv_data, fence, handle); if (ret) - HWFNC_ERR("failed to add signal callback for interop fence handle:%llu ret:%d\n", - handle, ret); + HWFNC_ERR("failed to add signal callback for fence handle:%llu is_synx:%s ret:%d\n", + handle, is_synx ? "true" : "false", ret); error: /* destroy reference held by creator of fence */ - destroy_ret = hw_fence_destroy_with_hash(hw_fence_drv_data, &interop_client, + destroy_ret = hw_fence_destroy_with_hash(hw_fence_drv_data, &dummy_client, handle); if (destroy_ret) { - HWFNC_ERR("failed destroy interop fence client:%d handle:%llu ret:%d\n", - interop_client.client_id, handle, ret); + HWFNC_ERR("failed destroy fence client:%d handle:%llu is_synx:%s ret:%d\n", + dummy_client.client_id, handle, is_synx ? "true" : "false", ret); ret = destroy_ret; } From 968664da7db980cd1f95da9729954f606b35dfa3 Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 10 Aug 2023 16:18:16 -0700 Subject: [PATCH 103/166] mm-drivers: hw_fence: use different lock implementation for soccp On targets with fencing workload running on a secondary vm, the inter-vm lock uses two bits to distinguish lock owner between hw-fence driver and vm. If the vm fails to acquire an inter-vm lock, then it will go to sleep. Upon unlocking the contended inter-vm lock, the hlos must wake up the vm via an ipcc signal. This change adds lock implementation for targets with soccp: For inter- processor lock with soccp, only one bit is needed for lock and unlock, and ipcc signal should not be triggered for unlock. Change-Id: I910dbe16237196621a25fe9e70bfd2d4bbce5674 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 4 ++ hw_fence/src/hw_fence_drv_utils.c | 57 +++++++++++++++++++++++++--- 2 files changed, 56 insertions(+), 5 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 79effdbff6..d101f2f18f 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -375,6 +375,7 @@ struct hw_fence_signal_cb { * @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized * @dma_fence_table_lock: lock to synchronize access to dma-fence table * @dma_fence_table: table with internal dma-fences for hw-fences + * @has_soccp: flag to indicate if soccp is present (otherwise vm is used) */ struct hw_fence_driver_data { @@ -460,6 +461,9 @@ struct hw_fence_driver_data { spinlock_t dma_fence_table_lock; /* table with internal dma-fences created by the this driver on client's behalf */ DECLARE_HASHTABLE(dma_fence_table, DMA_FENCE_HASH_TABLE_BIT); + + /* soccp is present */ + bool has_soccp; }; /** diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 261fce973d..63871f227b 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -140,7 +140,7 @@ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] true}, }; -static void _lock(uint64_t *wait) +static void _lock_vm(uint64_t *wait) { #if defined(__aarch64__) __asm__( @@ -156,10 +156,12 @@ static void _lock(uint64_t *wait) : : [i_lock] "r" (wait) : "memory"); +#elif + HWFNC_ERR("cannot lock\n"); #endif } -static void _unlock(struct hw_fence_driver_data *drv_data, uint64_t *lock) +static void _unlock_vm(struct hw_fence_driver_data *drv_data, uint64_t *lock) { uint64_t lock_val; @@ -174,6 +176,8 @@ static void _unlock(struct hw_fence_driver_data *drv_data, uint64_t *lock) : : [i_out] "r" (lock) : "memory"); +#elif + HWFNC_ERR("cannot unlock\n"); #endif mb(); /* Make sure the memory is updated */ @@ -195,13 +199,52 @@ static void _unlock(struct hw_fence_driver_data *drv_data, uint64_t *lock) } } +static void _lock_soccp(uint64_t *wait) +{ + /* Wait (without WFE) */ +#if defined(__aarch64__) + __asm__("SEVL\n\t" + "PRFM PSTL1KEEP, [%x[i_lock]]\n\t" + "1:\n\t" + "LDAXR W5, [%x[i_lock]]\n\t" + "CBNZ W5, 1b\n\t" + "STXR W5, W0, [%x[i_lock]]\n\t" + "CBNZ W5, 1b\n" + : + : [i_lock] "r" (wait) + : "memory"); +#elif + HWFNC_ERR("cannot lock\n"); +#endif +} + +static void _unlock_soccp(uint64_t *lock) +{ + /* Signal Client */ +#if defined(__aarch64__) + __asm__("STLR WZR, [%x[i_out]]\n\t" + "SEV\n" + : + : [i_out] "r" (lock) + : "memory"); +#elif + HWFNC_ERR("cannot unlock\n"); +#endif +} + void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, bool val) { if (val) { preempt_disable(); - _lock(lock); + if (drv_data->has_soccp) + _lock_soccp(lock); + else + _lock_vm(lock); } else { - _unlock(drv_data, lock); + if (drv_data->has_soccp) + _unlock_soccp(lock); + else + _unlock_vm(drv_data, lock); preempt_enable(); } } @@ -997,6 +1040,9 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) if (!drv_data->clients) return -ENOMEM; + /* check presence of soccp */ + drv_data->has_soccp = of_property_read_bool(drv_data->dev->of_node, "soccp_controller"); + HWFNC_DBG_INIT("table: entries=%u mem_size=%u queue: entries=%u\b", drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size, drv_data->hw_fence_queue_entries); @@ -1004,6 +1050,7 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size); HWFNC_DBG_INIT("clients_num: %u, total_mem_size:%u\n", drv_data->clients_num, drv_data->used_mem_size); + HWFNC_DBG_INIT("has_soccp:%s\n", drv_data->has_soccp ? "true" : "false"); return 0; } From 52f9541390a38b1b5962fcaaa195d005cb2e81f6 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 20 Sep 2023 13:27:21 -0700 Subject: [PATCH 104/166] mm-drivers: hw_fence: add ipcc support for targets with soccp This change updates ipcc initialization to enable send and receive signals related to the ctrl queues, dpu, and validation clients on targets with soccp. Change-Id: I0c5bc23fdbe03df8ee8bdf54f2de6d22246b862f Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_ipc.h | 49 ++++- hw_fence/include/hw_fence_drv_priv.h | 13 +- hw_fence/include/hw_fence_drv_utils.h | 24 ++- hw_fence/src/hw_fence_drv_debug.c | 2 +- hw_fence/src/hw_fence_drv_ipc.c | 286 ++++++++++++++++++++------ hw_fence/src/hw_fence_drv_priv.c | 32 ++- hw_fence/src/hw_fence_drv_utils.c | 114 ++++++---- hw_fence/src/msm_hw_fence.c | 20 +- 8 files changed, 412 insertions(+), 128 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_ipc.h b/hw_fence/include/hw_fence_drv_ipc.h index 93bafd1e93..610f71469a 100644 --- a/hw_fence/include/hw_fence_drv_ipc.h +++ b/hw_fence/include/hw_fence_drv_ipc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __HW_FENCE_DRV_IPC_H @@ -12,6 +12,7 @@ #define HW_FENCE_IPC_CLIENT_ID_IPE_VID 11 #define HW_FENCE_IPC_CLIENT_ID_VPU_VID 12 #define HW_FENCE_IPC_CLIENT_ID_DPU_VID 25 +#define HW_FENCE_IPC_CLIENT_ID_SOCCP_VID 46 #define HW_FENCE_IPC_CLIENT_ID_IFE0_VID 128 #define HW_FENCE_IPC_CLIENT_ID_IFE1_VID 129 #define HW_FENCE_IPC_CLIENT_ID_IFE2_VID 130 @@ -35,19 +36,29 @@ #define HW_FENCE_IPC_CLIENT_ID_IFE5_PID 16 #define HW_FENCE_IPC_CLIENT_ID_IFE6_PID 17 #define HW_FENCE_IPC_CLIENT_ID_IFE7_PID 18 +#define HW_FENCE_IPC_CLIENT_ID_SOCCP_PID 22 + +/* ipc clients physical client-id on other targets */ +#define HW_FENCE_IPC_CLIENT_ID_IPE_PID_SUN 9 +#define HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN 20 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA 2 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE 2 #define HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE 4 +#define HW_FENCE_IPC_FENCE_PROTOCOL_ID_SUN 4 #define HW_FENCE_IPCC_HW_REV_170 0x00010700 /* Kalama */ #define HW_FENCE_IPCC_HW_REV_203 0x00020003 /* Pineapple */ +#define HW_FENCE_IPCC_HW_REV_2A2 0x00020A02 /* Sun */ #define IPC_PROTOCOLp_CLIENTc_VERSION(base, p, c) (base + (0x40000*p) + (0x1000*c)) #define IPC_PROTOCOLp_CLIENTc_CONFIG(base, p, c) (base + 0x8 + (0x40000*p) + (0x1000*c)) #define IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(base, p, c) \ (base + 0x14 + (0x40000*p) + (0x1000*c)) #define IPC_PROTOCOLp_CLIENTc_SEND(base, p, c) ((base + 0xc) + (0x40000*p) + (0x1000*c)) +#define IPC_PROTOCOLp_CLIENTc_RECV_ID(base, p, c) (base + 0x10 + (0x40000*p) + (0x1000*c)) +#define IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_CLEAR(base, p, c) (base + 0x1C + (0x40000*p) + (0x1000*c)) +#define HW_FENCE_IPC_RECV_ID_NONE 0xFFFFFFFF /** * hw_fence_ipcc_trigger_signal() - Trigger ipc signal for the requested client/signal pair. @@ -70,12 +81,33 @@ void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data, int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data); /** - * hw_fence_ipcc_enable_dpu_signaling() - Enable ipcc signaling for dpu client. - * @drv_data: driver data. + * hw_fence_ipcc_enable_protocol() - Enable ipcc protocol used for hw-fencing + * (either compute l1 or fence depending on target) for given client. + * @drv_data: driver data + * @client_id: hw fence driver client id + * + * This should only be called once for each IPCC client, e.g. if protocol is enabled + * for one dpu client, it should not be called again for another dpu client. * * Return: 0 on success or negative errno (-EINVAL) */ -int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data); +int hw_fence_ipcc_enable_protocol(struct hw_fence_driver_data *drv_data, u32 client_id); + +/** + * hw_fence_ipcc_enable_client_signal_pairs() - Enable ipcc signaling for all client-signal + * pairs required for hw-fencing for given client. + * @drv_data: driver data. + * @start_client: first hw fence driver client id for given ipcc client + * + * This API enables input signal from driver and fctl (if fctl is separate from driver) for + * given client. IPCC protocol must be enabled via hw_fence_ipcc_enable_protocol() prior + * to this call. This API iterates through driver's ipc client table to ensure all client- + * signal pairs for given client are enabled. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_enable_client_signal_pairs(struct hw_fence_driver_data *drv_data, + u32 start_client); /** * hw_fence_ipcc_get_client_virt_id() - Returns the ipc client virtual id that corresponds to the @@ -132,4 +164,13 @@ bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int c */ bool hw_fence_ipcc_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id); +/** + * hw_fence_ipcc_get_signaled_clients_mask() - Returns mask to indicate signals for which clients + * were received by HW Fence Driver + * @drv_data: driver_data + * + * Return: mask on success or zero upon error + */ +u64 hw_fence_ipcc_get_signaled_clients_mask(struct hw_fence_driver_data *drv_data); + #endif /* __HW_FENCE_DRV_IPC_H */ diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index d101f2f18f..741318fb69 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -364,6 +364,8 @@ struct hw_fence_signal_cb { * @protocol_id: ipcc protocol id used by this driver * @ipcc_client_vid: ipcc client virtual-id for this driver * @ipcc_client_pid: ipcc client physical-id for this driver + * @ipcc_fctl_vid: ipcc client virtual-id for fctl + * @ipcc_fctl_pid: ipcc client physical-id for fctl * @ipc_clients_table: table with the ipcc mapping for each client of this driver * @qtime_reg_base: qtimer register base address * @qtime_io_mem: qtimer io mem map @@ -371,8 +373,9 @@ struct hw_fence_signal_cb { * @client_id_mask: bitmask for tracking registered client_ids * @clients_register_lock: lock to synchronize clients registration and deregistration * @clients: table with the handles of the registered clients; size is equal to clients_num - * @vm_ready: flag to indicate if vm has been initialized + * @fctl_ready: flag to indicate if fence controller has been initialized * @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized + * @ipcc_val_initialized: flag to indicate if val is initialized * @dma_fence_table_lock: lock to synchronize access to dma-fence table * @dma_fence_table: table with internal dma-fences for hw-fences * @has_soccp: flag to indicate if soccp is present (otherwise vm is used) @@ -439,6 +442,8 @@ struct hw_fence_driver_data { u32 protocol_id; u32 ipcc_client_vid; u32 ipcc_client_pid; + u32 ipcc_fctl_vid; + u32 ipcc_fctl_pid; /* table with mapping of ipc client for each hw-fence client */ struct hw_fence_client_ipc_map *ipc_clients_table; @@ -454,10 +459,14 @@ struct hw_fence_driver_data { /* table with registered client handles */ struct msm_hw_fence_client **clients; - bool vm_ready; + bool fctl_ready; /* state variables */ bool ipcc_dpu_initialized; +#if IS_ENABLED(CONFIG_DEBUG_FS) + bool ipcc_val_initialized; +#endif /* CONFIG_DEBUG_FS */ + spinlock_t dma_fence_table_lock; /* table with internal dma-fences created by the this driver on client's behalf */ DECLARE_HASHTABLE(dma_fence_table, DMA_FENCE_HASH_TABLE_BIT); diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 29c0f343e8..a6e1721658 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __HW_FENCE_DRV_UTILS_H @@ -49,7 +49,7 @@ enum hw_fence_mem_reserve { void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, bool val); /** - * hw_fence_utils_init_virq() - Initialilze doorbell (i.e. vIRQ) for SVM to HLOS signaling + * hw_fence_utils_init_virq() - Initialize doorbell (i.e. vIRQ) for SVM to HLOS signaling * @drv_data: hw fence driver data * * Returns zero if success, otherwise returns negative error code. @@ -57,12 +57,22 @@ void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data); /** - * hw_fence_utils_process_doorbell_mask() - Sends doorbell mask to process the signaled clients - * this API is only exported for simulation purposes. - * @drv_data: hw fence driver data. - * @db_flags: doorbell flag + * hw_fence_utils_init_soccp_irq() - Initialize interrupt handler for SOCCP to HLOS signaling + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. */ -void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags); +int hw_fence_utils_init_soccp_irq(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_process_signaled_clients_mask() - Process the mask containing HW Fence client IDs + * that HW Fence Driver is responsible for, i.e. + * ctrl queue and validation clients. + * @drv_data: hw fence driver data. + * @mask: mask with signaled clients + */ +void hw_fence_utils_process_signaled_clients_mask(struct hw_fence_driver_data *drv_data, + u64 mask); /** * hw_fence_utils_alloc_mem() - Allocates the carved-out memory pool that will be used for the HW diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 2dcbe4f2dc..720bc04de5 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -124,7 +124,7 @@ static ssize_t hw_fence_dbg_ipcc_write(struct file *file, const char __user *use struct hw_fence_driver_data *drv_data = file->private_data; return _debugfs_ipcc_trigger(file, user_buf, count, ppos, drv_data->ipcc_client_pid, - drv_data->ipcc_client_vid); + drv_data->ipcc_fctl_vid); } /** diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index b93956c5fd..ad714c3017 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -16,6 +16,12 @@ #define HW_FENCE_IPC_MAP_MAX (HW_FENCE_MAX_STATIC_CLIENTS_INDEX + \ HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE) +/** + * HW_FENCE_IPCC_MAX_LOOPS: + * Max number of times HW Fence Driver can read interrupt information + */ +#define HW_FENCE_IPCC_MAX_LOOPS 100 + /** * struct hw_fence_client_ipc_map - map client id with ipc signal for trigger. * @ipc_client_id_virt: virtual ipc client id for the hw-fence client. @@ -119,6 +125,55 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_IPC_MAP_MAX] {HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, true},/* ife7*/ }; +/** + * struct hw_fence_clients_ipc_map_sun - Table makes the 'client to signal' mapping, which is + * used by the hw fence driver to trigger ipc signal when hw fence is already + * signaled. + * This version is for sun target. + * + * Note that the index of this struct must match the enum hw_fence_client_id for clients ids less + * than HW_FENCE_MAX_STATIC_CLIENTS_INDEX. + * For clients with configurable sub-clients, the index of this struct matches + * HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC). + */ +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_sun[HW_FENCE_IPC_MAP_MAX] = { + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 0, true, true},/*ctrlq */ + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, false, false},/* ctx0*/ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 0, false, true}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 1, false, true}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 2, false, true}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 3, false, true}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 4, false, true}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 5, false, true}, +#if IS_ENABLED(CONFIG_DEBUG_FS) + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, false},/*val0*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, false},/*val1*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, false},/*val2*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, false},/*val3*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, false},/*val4*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, false},/*val5*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, false},/*val6*/ +#else + {0, 0, 0, false, false}, /* val0 */ + {0, 0, 0, false, false}, /* val1 */ + {0, 0, 0, false, false}, /* val2 */ + {0, 0, 0, false, false}, /* val3 */ + {0, 0, 0, false, false}, /* val4 */ + {0, 0, 0, false, false}, /* val5 */ + {0, 0, 0, false, false}, /* val6 */ +#endif /* CONFIG_DEBUG_FS */ + {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID_SUN, 0, true, true}, /*ipe*/ + {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true}, /* vpu */ + {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, true},/* ife0*/ + {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, true},/* ife1*/ + {HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, true},/* ife2*/ + {HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, true},/* ife3*/ + {HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, true},/* ife4*/ + {HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, true},/* ife5*/ + {HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, true},/* ife6*/ + {HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, true},/* ife7*/ +}; + int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id) { if (!drv_data || client_id >= drv_data->clients_num) @@ -309,6 +364,8 @@ static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 case HW_FENCE_IPCC_HW_REV_170: drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_fctl_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_fctl_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA; drv_data->ipc_clients_table = hw_fence_clients_ipc_map; HWFNC_DBG_INIT("ipcc protocol_id: Kalama\n"); @@ -316,22 +373,58 @@ static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 case HW_FENCE_IPCC_HW_REV_203: drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID; + drv_data->ipcc_fctl_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_fctl_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID; drv_data->protocol_id = HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE; /* Fence */ ret = _hw_fence_ipcc_init_map_with_configurable_clients(drv_data, hw_fence_clients_ipc_map_v2); HWFNC_DBG_INIT("ipcc protocol_id: Pineapple\n"); break; + case HW_FENCE_IPCC_HW_REV_2A2: + drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID; + drv_data->ipcc_fctl_vid = drv_data->has_soccp ? HW_FENCE_IPC_CLIENT_ID_SOCCP_VID : + HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_fctl_pid = drv_data->has_soccp ? HW_FENCE_IPC_CLIENT_ID_SOCCP_PID : + HW_FENCE_IPC_CLIENT_ID_APPS_PID; + drv_data->protocol_id = HW_FENCE_IPC_FENCE_PROTOCOL_ID_SUN; /* Fence */ + ret = _hw_fence_ipcc_init_map_with_configurable_clients(drv_data, + hw_fence_clients_ipc_map_sun); + HWFNC_DBG_INIT("ipcc protocol_id: Sun\n"); + break; default: + HWFNC_ERR("unrecognized ipcc hw-rev:0x%x\n", hwrev); return -1; } return ret; } -int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) +static int _enable_client_signal_pair(struct hw_fence_driver_data *drv_data, + u32 rx_client_id_phys, u32 tx_client_id_vid, u32 signal_id) { void __iomem *ptr; u32 val; + + if (!drv_data || !drv_data->ipcc_io_mem || !drv_data->protocol_id) { + HWFNC_ERR("invalid drv_data:0x%pK ipcc_io_mem:0x%pK protocol:%d\n", + drv_data, drv_data ? drv_data->ipcc_io_mem : NULL, + drv_data ? drv_data->protocol_id : -1); + return -EINVAL; + } + + val = ((tx_client_id_vid) << 16) | ((signal_id) & 0xFFFF); + ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, drv_data->protocol_id, + rx_client_id_phys); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); + writel_relaxed(val, ptr); + + return 0; +} + +int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) +{ + u32 val; int ret; HWFNC_DBG_H("enable ipc +\n"); @@ -347,82 +440,145 @@ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) return -EINVAL; } - /* Enable compute l1 (protocol_id = 2) */ - val = 0x00000000; - ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id, - drv_data->ipcc_client_pid); - HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); - writel_relaxed(val, ptr); + /* Enable protocol for ctrl queue */ + hw_fence_ipcc_enable_protocol(drv_data, 0); - /* Enable Client-Signal pairs from APPS(NS) (0x8) to APPS(NS) (0x8) */ - val = 0x000080000; - ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, drv_data->protocol_id, - drv_data->ipcc_client_pid); - HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); - writel_relaxed(val, ptr); + /* Enable Client-Signal pairs from FCTL (SOCCP or APSS(NS)) to APPS(NS) (0x8) */ + ret = _enable_client_signal_pair(drv_data, drv_data->ipcc_client_pid, + drv_data->ipcc_fctl_vid, 0); HWFNC_DBG_H("enable ipc -\n"); return 0; } -int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data) +int hw_fence_ipcc_enable_protocol(struct hw_fence_driver_data *drv_data, u32 client_id) { - struct hw_fence_client_ipc_map *hw_fence_client; - bool protocol_enabled = false; void __iomem *ptr; u32 val; - int i; - HWFNC_DBG_H("enable dpu ipc +\n"); - - if (!drv_data || !drv_data->protocol_id || !drv_data->ipc_clients_table) { - HWFNC_ERR("invalid drv data\n"); - return -1; + if (!drv_data || !drv_data->protocol_id || !drv_data->ipc_clients_table || + client_id >= drv_data->clients_num) { + HWFNC_ERR("drv_data:0x%pK protocol:%d ipc_table:0x%pK client_id:%u max:%u\n", + drv_data, drv_data ? drv_data->protocol_id : -1, + drv_data ? drv_data->ipc_clients_table : NULL, client_id, + drv_data ? drv_data->clients_num : -1); + return -EINVAL; } - HWFNC_DBG_H("ipcc_io_mem:0x%llx\n", (u64)drv_data->ipcc_io_mem); - - HWFNC_DBG_H("Initialize dpu signals\n"); - /* Enable Client-Signal pairs from DPU (25) to APPS(NS) (8) */ - for (i = 0; i < drv_data->clients_num; i++) { - hw_fence_client = &drv_data->ipc_clients_table[i]; - - /* skip any client that is not a dpu client */ - if (hw_fence_client->ipc_client_id_virt != HW_FENCE_IPC_CLIENT_ID_DPU_VID) - continue; - - if (!protocol_enabled) { - /* - * First DPU client will enable the protocol for dpu, e.g. compute l1 - * (protocol_id = 2) or fencing protocol, depending on the target, for the - * dpu client (vid = 25, pid = 9). - * Sets bit(1) to clear when RECV_ID is read - */ - val = 0x00000001; - ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, - drv_data->protocol_id, hw_fence_client->ipc_client_id_phys); - HWFNC_DBG_H("Write:0x%x to RegOffset:0x%llx\n", val, (u64)ptr); - writel_relaxed(val, ptr); - - protocol_enabled = true; - } - - /* Enable signals for dpu client */ - HWFNC_DBG_H("dpu client:%d vid:%d pid:%d signal:%d\n", i, - hw_fence_client->ipc_client_id_virt, hw_fence_client->ipc_client_id_phys, - hw_fence_client->ipc_signal_id); - - /* Enable input apps-signal for dpu */ - val = (HW_FENCE_IPC_CLIENT_ID_APPS_VID << 16) | - (hw_fence_client->ipc_signal_id & 0xFFFF); - ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, - drv_data->protocol_id, hw_fence_client->ipc_client_id_phys); - HWFNC_DBG_H("Write:0x%x to RegOffset:0x%llx\n", val, (u64)ptr); - writel_relaxed(val, ptr); - } - - HWFNC_DBG_H("enable dpu ipc -\n"); + /* Sets bit(1) to clear when RECV_ID is read */ + val = 0x00000001; + ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id, + drv_data->ipc_clients_table[client_id].ipc_client_id_phys); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%llx\n", val, (u64)ptr); + writel_relaxed(val, ptr); return 0; } + +int hw_fence_ipcc_enable_client_signal_pairs(struct hw_fence_driver_data *drv_data, + u32 start_client) +{ + struct hw_fence_client_ipc_map *hw_fence_client; + int i, ipc_client_vid; + + HWFNC_DBG_H("enable ipc for client signal pairs +\n"); + + if (!drv_data || !drv_data->protocol_id || !drv_data->ipc_clients_table || + start_client >= drv_data->clients_num) { + HWFNC_ERR("drv_data:0x%pK protocol:%d ipc_table:0x%pK start_client:%u max:%u\n", + drv_data, drv_data ? drv_data->protocol_id : -1, + drv_data ? drv_data->ipc_clients_table : NULL, start_client, + drv_data ? drv_data->clients_num : -1); + return -EINVAL; + } + ipc_client_vid = drv_data->ipc_clients_table[start_client].ipc_client_id_virt; + + HWFNC_DBG_H("ipcc_io_mem:0x%llx\n", (u64)drv_data->ipcc_io_mem); + + HWFNC_DBG_H("Initialize %s ipc signals\n", _get_ipc_virt_client_name(ipc_client_vid)); + /* Enable Client-Signal pairs from Client to APPS(NS) (8) */ + for (i = start_client; i < drv_data->clients_num; i++) { + hw_fence_client = &drv_data->ipc_clients_table[i]; + + /* + * Stop after enabling signals for all clients with the same ipcc client id as the + * given client. + */ + if (hw_fence_client->ipc_client_id_virt != ipc_client_vid) + break; + + /* Enable signals for given client */ + HWFNC_DBG_H("%s client:%d vid:%d pid:%d signal:%d has_soccp:%d\n", + _get_ipc_virt_client_name(ipc_client_vid), i, + hw_fence_client->ipc_client_id_virt, hw_fence_client->ipc_client_id_phys, + hw_fence_client->ipc_signal_id, drv_data->has_soccp); + + /* Enable input signal from driver to client */ + if (ipc_client_vid != drv_data->ipcc_client_vid) + _enable_client_signal_pair(drv_data, hw_fence_client->ipc_client_id_phys, + drv_data->ipcc_client_vid, hw_fence_client->ipc_signal_id); + + /* If fctl separate from driver, enable separate input fctl-signal for client */ + if (drv_data->ipcc_client_vid != drv_data->ipcc_fctl_vid) + _enable_client_signal_pair(drv_data, hw_fence_client->ipc_client_id_phys, + drv_data->ipcc_fctl_vid, hw_fence_client->ipc_signal_id); + } + + HWFNC_DBG_H("enable %s ipc for start:%d end:%d -\n", + _get_ipc_virt_client_name(ipc_client_vid), start_client, i); + + return 0; +} + +u64 hw_fence_ipcc_get_signaled_clients_mask(struct hw_fence_driver_data *drv_data) +{ + u32 client_id, signal_id, reg_val; + u64 mask = 0; + int i; + + if (!drv_data || !drv_data->protocol_id || !drv_data->ipcc_client_pid || + !drv_data->ipcc_fctl_vid || !drv_data->has_soccp) { + HWFNC_ERR("invalid drv_data:0x%pK protocol:%d drv_pid:%d fctl_vid:%d\n", + drv_data, drv_data ? drv_data->protocol_id : -1, + drv_data ? drv_data->ipcc_client_pid : -1, + drv_data ? drv_data->ipcc_fctl_vid : -1); + return -1; + } + + /* read recv_id until done processing all clients signals */ + for (i = 0; i < HW_FENCE_IPCC_MAX_LOOPS; i++) { + mb(); /* make sure memory is updated */ + reg_val = readl_relaxed(IPC_PROTOCOLp_CLIENTc_RECV_ID(drv_data->ipcc_io_mem, + drv_data->protocol_id, drv_data->ipcc_client_pid)); + + /* finished reading clients */ + if (reg_val == HW_FENCE_IPC_RECV_ID_NONE) + return mask; + + client_id = (reg_val >> 16) & 0xFFFF; + signal_id = reg_val & 0xFFFF; + HWFNC_DBG_IRQ("read recv_id value:0x%x client:%u signal:%u\n", reg_val, client_id, + signal_id); + + if (client_id != drv_data->ipcc_fctl_vid) { + HWFNC_ERR("Received client:%u signal:%u expected client:%u\n", + client_id, signal_id, drv_data->ipcc_fctl_vid); + continue; + } + +#if IS_ENABLED(CONFIG_DEBUG_FS) + /* received signals from SOCCP for validation clients */ + if (signal_id >= hw_fence_ipcc_get_signal_id(drv_data, HW_FENCE_CLIENT_ID_VAL0) + && signal_id <= hw_fence_ipcc_get_signal_id(drv_data, + HW_FENCE_CLIENT_ID_VAL6)) + signal_id = signal_id - hw_fence_ipcc_get_signal_id(drv_data, + HW_FENCE_CLIENT_ID_VAL0) + HW_FENCE_CLIENT_ID_VAL0; +#endif /* CONFIG_DEBUG_FS*/ + mask |= BIT(signal_id); + } + + HWFNC_ERR("irq_handler has too many loops i=%d max:%d\n", i, HW_FENCE_IPCC_MAX_LOOPS); + + return mask; +} diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 962d4afe3a..13006a603b 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -722,10 +722,14 @@ int hw_fence_init(struct hw_fence_driver_data *drv_data) goto exit; } - /* Init vIRQ from VM */ - ret = hw_fence_utils_init_virq(drv_data); + /* Init irq from fctl */ + if (drv_data->has_soccp) + ret = hw_fence_utils_init_soccp_irq(drv_data); + else + ret = hw_fence_utils_init_virq(drv_data); if (ret) { - HWFNC_ERR("failed to init virq\n"); + HWFNC_ERR("failed to init irq has_soccp:%s\n", drv_data->has_soccp ? "true" : + "false"); goto exit; } @@ -778,7 +782,7 @@ exit: int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client) { - int ret = 0; + int client_id, ret = 0; /* * Initialize IPCC Signals for this client @@ -800,7 +804,20 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, case HW_FENCE_CLIENT_ID_VAL4: case HW_FENCE_CLIENT_ID_VAL5: case HW_FENCE_CLIENT_ID_VAL6: - /* nothing to initialize for validation clients */ + /* initialize ipcc signals for val clients */ + HWFNC_DBG_H("init_controller_signal: val client_id_ext:%d init:%d\n", + hw_fence_client->client_id_ext, drv_data->ipcc_val_initialized); + + if (!drv_data->ipcc_val_initialized) { + drv_data->ipcc_val_initialized = true; + client_id = hw_fence_utils_get_client_id_priv(drv_data, + HW_FENCE_CLIENT_ID_VAL0); + + if (drv_data->has_soccp) { + /* init input-soccp signals for val clients */ + hw_fence_ipcc_enable_client_signal_pairs(drv_data, client_id); + } + } break; #endif /* CONFIG_DEBUG_FS */ case HW_FENCE_CLIENT_ID_CTL0: @@ -814,9 +831,12 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, hw_fence_client->client_id_ext, drv_data->ipcc_dpu_initialized); if (!drv_data->ipcc_dpu_initialized) { drv_data->ipcc_dpu_initialized = true; + client_id = hw_fence_utils_get_client_id_priv(drv_data, + HW_FENCE_CLIENT_ID_CTL0); /* Init dpu client ipcc signal */ - hw_fence_ipcc_enable_dpu_signaling(drv_data); + hw_fence_ipcc_enable_protocol(drv_data, client_id); + hw_fence_ipcc_enable_client_signal_pairs(drv_data, client_id); } break; case HW_FENCE_CLIENT_ID_IPE ... HW_FENCE_CLIENT_ID_IPE + diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 63871f227b..58d6b3f7d6 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -58,27 +58,28 @@ #define HW_FENCE_CLIENT_TYPE_MAX_IFE 32 /** - * HW_FENCE_CTRL_QUEUE_DOORBELL: - * Bit set in doorbell flags mask if hw fence driver should read ctrl rx queue + * HW_FENCE_CLIENT_ID_CTRL_QUEUE: + * Bit set in signaled clients mask if hw fence driver should read ctrl rx queue */ -#define HW_FENCE_CTRL_QUEUE_DOORBELL 0 +#define HW_FENCE_CLIENT_ID_CTRL_QUEUE 0 /** - * HW_FENCE_DOORBELL_FLAGS_ID_LAST: - * Last doorbell flags id for which HW Fence Driver can receive doorbell + * HW_FENCE_SIGNALED_CLIENTS_LAST: + * Last signaled clients id for which HW Fence Driver can receive doorbell */ #if IS_ENABLED(CONFIG_DEBUG_FS) -#define HW_FENCE_DOORBELL_FLAGS_ID_LAST HW_FENCE_CLIENT_ID_VAL6 +#define HW_FENCE_SIGNALED_CLIENTS_LAST HW_FENCE_CLIENT_ID_VAL6 #else -#define HW_FENCE_DOORBELL_FLAGS_ID_LAST HW_FENCE_CTRL_QUEUE_DOORBELL +#define HW_FENCE_SIGNALED_CLIENTS_LAST HW_FENCE_CLIENT_ID_CTRL_QUEUE #endif /* CONFIG_DEBUG_FS */ /** - * HW_FENCE_DOORBELL_MASK: - * Each bit in this mask represents possible doorbell flag ids for which hw fence driver can receive + * HW_FENCE_ALL_SIGNALED_CLIENTS_MASK: + * Each bit in this mask represents possible signaled client ids for which hw fence driver can + * receive */ -#define HW_FENCE_DOORBELL_MASK \ - GENMASK(HW_FENCE_DOORBELL_FLAGS_ID_LAST, HW_FENCE_CTRL_QUEUE_DOORBELL) +#define HW_FENCE_ALL_SIGNALED_CLIENTS_MASK \ + GENMASK(HW_FENCE_SIGNALED_CLIENTS_LAST, HW_FENCE_CLIENT_ID_CTRL_QUEUE) /** * HW_FENCE_MAX_ITER_READ: @@ -195,7 +196,7 @@ static void _unlock_vm(struct hw_fence_driver_data *drv_data, uint64_t *lock) #endif hw_fence_ipcc_trigger_signal(drv_data, drv_data->ipcc_client_pid, - drv_data->ipcc_client_vid, 30); /* Trigger APPS Signal 30 */ + drv_data->ipcc_fctl_vid, 30); /* Trigger APPS Signal 30 */ } } @@ -344,14 +345,14 @@ static int _process_fence_error_client_loopback(struct hw_fence_driver_data *drv return ret; } -static int _process_doorbell_id(struct hw_fence_driver_data *drv_data, int db_flag_id) +static int _process_signaled_client_id(struct hw_fence_driver_data *drv_data, int client_id) { int ret; - HWFNC_DBG_H("Processing doorbell mask id:%d\n", db_flag_id); - switch (db_flag_id) { - case HW_FENCE_CTRL_QUEUE_DOORBELL: - ret = _process_fence_error_client_loopback(drv_data, db_flag_id); + HWFNC_DBG_H("Processing signaled client mask id:%d\n", client_id); + switch (client_id) { + case HW_FENCE_CLIENT_ID_CTRL_QUEUE: + ret = _process_fence_error_client_loopback(drv_data, client_id); break; #if IS_ENABLED(CONFIG_DEBUG_FS) case HW_FENCE_CLIENT_ID_VAL0: @@ -361,35 +362,40 @@ static int _process_doorbell_id(struct hw_fence_driver_data *drv_data, int db_fl case HW_FENCE_CLIENT_ID_VAL4: case HW_FENCE_CLIENT_ID_VAL5: case HW_FENCE_CLIENT_ID_VAL6: - ret = process_validation_client_loopback(drv_data, db_flag_id); + ret = process_validation_client_loopback(drv_data, client_id); break; #endif /* CONFIG_DEBUG_FS */ default: - HWFNC_ERR("unknown mask id:%d\n", db_flag_id); + HWFNC_ERR("unknown mask id:%d\n", client_id); ret = -EINVAL; } return ret; } -void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags) +void hw_fence_utils_process_signaled_clients_mask(struct hw_fence_driver_data *drv_data, + u64 signaled_clients_mask) { - int db_flag_id = HW_FENCE_CTRL_QUEUE_DOORBELL; + int signaled_client_id; u64 mask; - for (; db_flag_id <= HW_FENCE_DOORBELL_FLAGS_ID_LAST; db_flag_id++) { - mask = 1 << db_flag_id; - if (mask & db_flags) { - HWFNC_DBG_H("db_flag:%d signaled! flags:0x%llx\n", db_flag_id, db_flags); + for (signaled_client_id = HW_FENCE_CLIENT_ID_CTRL_QUEUE; + signaled_client_id <= HW_FENCE_SIGNALED_CLIENTS_LAST; + signaled_client_id++) { + mask = 1 << signaled_client_id; + if (mask & signaled_clients_mask) { + HWFNC_DBG_H("received signaled_client:%d mask:0x%llx\n", signaled_client_id, + signaled_clients_mask); - if (_process_doorbell_id(drv_data, db_flag_id)) - HWFNC_ERR("Failed to process db_flag_id:%d\n", db_flag_id); + if (_process_signaled_client_id(drv_data, signaled_client_id)) + HWFNC_ERR("Failed to process signaled_client:%d\n", + signaled_client_id); /* clear mask for this flag id if nothing else pending finish */ - db_flags = db_flags & ~(mask); - HWFNC_DBG_H("db_flag_id:%d cleared flags:0x%llx mask:0x%llx ~mask:0x%llx\n", - db_flag_id, db_flags, mask, ~(mask)); - if (!db_flags) + signaled_clients_mask = signaled_clients_mask & ~(mask); + HWFNC_DBG_H("signaled_client:%d cleared flags:0x%llx mask:0x%llx\n", + signaled_client_id, signaled_clients_mask, mask); + if (!signaled_clients_mask) break; } } @@ -399,7 +405,7 @@ void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, static void _hw_fence_cb(int irq, void *data) { struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data; - gh_dbl_flags_t clear_flags = HW_FENCE_DOORBELL_MASK; + gh_dbl_flags_t clear_flags = HW_FENCE_ALL_SIGNALED_CLIENTS_MASK; int ret; if (!drv_data) @@ -414,7 +420,7 @@ static void _hw_fence_cb(int irq, void *data) HWFNC_DBG_IRQ("db callback label:%d irq:%d flags:0x%llx qtime:%llu\n", drv_data->db_label, irq, clear_flags, hw_fence_get_qtime(drv_data)); - hw_fence_utils_process_doorbell_mask(drv_data, clear_flags); + hw_fence_utils_process_signaled_clients_mask(drv_data, clear_flags); } int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data) @@ -447,6 +453,44 @@ int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data) return 0; } +static irqreturn_t hw_fence_soccp_irq_handler(int irq, void *data) +{ + struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data; + u64 mask; + + mask = hw_fence_ipcc_get_signaled_clients_mask(drv_data); + hw_fence_utils_process_signaled_clients_mask(drv_data, mask); + + return IRQ_HANDLED; +} + +int hw_fence_utils_init_soccp_irq(struct hw_fence_driver_data *drv_data) +{ + struct platform_device *pdev; + int irq, ret; + + if (!drv_data || !drv_data->dev || !drv_data->has_soccp) { + HWFNC_ERR("invalid drv_data:0x%pK dev:0x%pK has_soccp:%d\n", drv_data, + drv_data ? drv_data->dev : NULL, drv_data ? drv_data->has_soccp : -1); + return -EINVAL; + } + + pdev = to_platform_device(drv_data->dev); + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + HWFNC_ERR("failed to get the irq\n"); + return irq; + } + HWFNC_DBG_INIT("Registering irq:%d\n", irq); + + ret = devm_request_irq(drv_data->dev, irq, hw_fence_soccp_irq_handler, IRQF_TRIGGER_HIGH, + "hwfence-driver", drv_data); + if (ret < 0) + HWFNC_ERR("failed to register irq:%d ret:%d\n", irq, ret); + + return ret; +} + static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, gh_vmid_t self, gh_vmid_t peer) { @@ -567,11 +611,11 @@ static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *da if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid)) HWFNC_ERR("failed to share memory\n"); else - drv_data->vm_ready = true; + drv_data->fctl_ready = true; } else { if (drv_data->res.start == res.start && resource_size(&drv_data->res) == resource_size(&res)) { - drv_data->vm_ready = true; + drv_data->fctl_ready = true; HWFNC_DBG_INIT("mem_ready: add:0x%llx size:%llu ret:%d\n", res.start, resource_size(&res), ret); } else { diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index aa3563894a..0c4fb2e2f9 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -180,7 +180,7 @@ int msm_hw_fence_create(void *client_handle, return -EINVAL; } - if (!hw_fence_drv_data->vm_ready) { + if (!hw_fence_drv_data->fctl_ready) { HWFNC_DBG_H("VM not ready, cannot create fence\n"); return -EAGAIN; } @@ -342,7 +342,7 @@ int msm_hw_fence_wait_update_v2(void *client_handle, return -EINVAL; } - if (!hw_fence_drv_data->vm_ready) { + if (!hw_fence_drv_data->fctl_ready) { HWFNC_DBG_H("VM not ready, cannot destroy fence\n"); return -EAGAIN; } @@ -453,7 +453,7 @@ int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) return -EINVAL; } - if (!hw_fence_drv_data->vm_ready) { + if (!hw_fence_drv_data->fctl_ready) { HWFNC_DBG_H("VM not ready, cannot reset client\n"); return -EAGAIN; } @@ -498,7 +498,7 @@ int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 erro struct msm_hw_fence_client *hw_fence_client; if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready || - !hw_fence_drv_data->vm_ready) { + !hw_fence_drv_data->fctl_ready) { HWFNC_ERR("hw fence driver or vm not ready\n"); return -EAGAIN; } else if (IS_ERR_OR_NULL(client_handle) || @@ -525,7 +525,7 @@ int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, u3 struct msm_hw_fence_client *hw_fence_client; if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready || - !hw_fence_drv_data->vm_ready) { + !hw_fence_drv_data->fctl_ready) { HWFNC_ERR("hw fence driver or vm not ready\n"); return -EAGAIN; } else if (IS_ERR_OR_NULL(client_handle) || @@ -556,7 +556,7 @@ int msm_hw_fence_trigger_signal(void *client_handle, struct msm_hw_fence_client *hw_fence_client; if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready - || !hw_fence_drv_data->vm_ready) { + || !hw_fence_drv_data->fctl_ready) { HWFNC_ERR("hw fence driver or vm not ready\n"); return -EAGAIN; } else if (IS_ERR_OR_NULL(client_handle)) { @@ -722,7 +722,7 @@ int msm_hw_fence_driver_doorbell_sim(u64 db_mask) HWFNC_DBG_IRQ("db callback sim-mode flags:0x%llx qtime:%llu\n", db_mask, hw_fence_get_qtime(hw_fence_drv_data)); - hw_fence_utils_process_doorbell_mask(hw_fence_drv_data, db_mask); + hw_fence_utils_process_signaled_clients_mask(hw_fence_drv_data, db_mask); return 0; } @@ -752,6 +752,10 @@ static int msm_hw_fence_probe_init(struct platform_device *pdev) /* set ready value so clients can register */ hw_fence_drv_data->resources_ready = true; } else { + /* check for presence of soccp */ + hw_fence_drv_data->has_soccp = + of_property_read_bool(hw_fence_drv_data->dev->of_node, "soccp_controller"); + /* Allocate hw fence driver mem pool and share it with HYP */ rc = hw_fence_utils_alloc_mem(hw_fence_drv_data); if (rc) { From b7345021fffdca9d6aae60b03f03a7d654b80edc Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 3 Nov 2023 12:54:37 -0700 Subject: [PATCH 105/166] mm-drivers: hw_fence: add one to one memory map for soccp Add change to one-to-one memory map the hwfence carved-out memory region for SOCCP from HW Fence Driver on targets with soccp. This change also removes unnecessary vm ops for memory share on such targets. Change-Id: I7c4fbe3010150531058660b148495c63e7baba24 Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_utils.c | 110 +++++++++++++++++++++++------- 1 file changed, 86 insertions(+), 24 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 58d6b3f7d6..814220105a 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -634,6 +635,78 @@ end: return NOTIFY_DONE; } +static int _register_vm_mem_with_hyp(struct hw_fence_driver_data *drv_data, + struct device_node *node_compat) +{ + int ret, notifier_ret; + + if (!drv_data || !node_compat) { + HWFNC_ERR("invalid params drv_data:0x%pK node_compat:0x%pK\n", drv_data, + node_compat); + return -EINVAL; + } + + ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->label); + if (ret) { + HWFNC_ERR("failed to find label info %d\n", ret); + return ret; + } + + /* Register memory with HYP for vm */ + ret = of_property_read_u32(node_compat, "peer-name", &drv_data->peer_name); + if (ret) + drv_data->peer_name = GH_SELF_VM; + + drv_data->rm_nb.notifier_call = hw_fence_rm_cb; + drv_data->rm_nb.priority = INT_MAX; + notifier_ret = gh_rm_register_notifier(&drv_data->rm_nb); + HWFNC_DBG_INIT("notifier: ret:%d peer_name:%d notifier_ret:%d\n", ret, + drv_data->peer_name, notifier_ret); + if (notifier_ret) { + HWFNC_ERR_ONCE("fail to register notifier ret:%d\n", ret); + return -EPROBE_DEFER; + } + + return 0; +} + +static int _init_soccp_mem(struct hw_fence_driver_data *drv_data) +{ + struct iommu_domain *domain; + int ret; + + if (!drv_data) { + HWFNC_ERR("invalid params drv_data:0x%pK\n", drv_data); + return -EINVAL; + } + + domain = iommu_get_domain_for_dev(drv_data->dev); + if (IS_ERR_OR_NULL(domain)) { + HWFNC_ERR("failed to get iommu domain for device ret:%ld\n", PTR_ERR(domain)); + return PTR_ERR(domain); + } + +#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE) + ret = iommu_map(domain, drv_data->res.start, drv_data->res.start, drv_data->size, + IOMMU_READ | IOMMU_WRITE, GFP_KERNEL); +#else + ret = iommu_map(domain, drv_data->res.start, drv_data->res.start, drv_data->size, + IOMMU_READ | IOMMU_WRITE); +#endif + if (ret) + HWFNC_ERR("failed to one-to-one map for soccp smmu addr:0x%llx sz:%lx ret:%d\n", + drv_data->res.start, drv_data->size, ret); + else + /* + * HW Fence Driver resources may not be ready at this point (this is separately + * tracked via resources_ready), but we assume soccp is ready once memory mapping + * is done. + */ + drv_data->fctl_ready = true; + + return ret; +} + /* Allocates carved-out mapped memory */ int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) { @@ -642,7 +715,7 @@ int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) const char *compat = "qcom,msm-hw-fence-mem"; struct device *dev = drv_data->dev; struct device_node *np; - int notifier_ret, ret; + int ret; node_compat = of_find_compatible_node(node, NULL, compat); if (!node_compat) { @@ -650,12 +723,6 @@ int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) return -EINVAL; } - ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->label); - if (ret) { - HWFNC_ERR("failed to find label info %d\n", ret); - return ret; - } - np = of_parse_phandle(node_compat, "shared-buffer", 0); if (!np) { HWFNC_ERR("failed to read shared-buffer info\n"); @@ -682,28 +749,23 @@ int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) return -ENOMEM; } - HWFNC_DBG_INIT("io_mem_base:0x%pK start:0x%llx end:0x%llx size:0x%lx name:%s\n", - drv_data->io_mem_base, drv_data->res.start, - drv_data->res.end, drv_data->size, drv_data->res.name); + HWFNC_DBG_INIT("io_mem_base:0x%pK start:0x%llx end:0x%llx sz:0x%lx name:%s has_soccp:%s\n", + drv_data->io_mem_base, drv_data->res.start, drv_data->res.end, drv_data->size, + drv_data->res.name, drv_data->has_soccp ? "true" : "false"); memset_io(drv_data->io_mem_base, 0x0, drv_data->size); - /* Register memory with HYP */ - ret = of_property_read_u32(node_compat, "peer-name", &drv_data->peer_name); + if (drv_data->has_soccp) + ret = _init_soccp_mem(drv_data); + else + ret = _register_vm_mem_with_hyp(drv_data, node_compat); + if (ret) - drv_data->peer_name = GH_SELF_VM; + HWFNC_ERR("failed to share memory with %s va:0x%pK pa:0x%llx sz:0x%lx name:%s\n", + drv_data->has_soccp ? "soccp" : "vm", drv_data->io_mem_base, + drv_data->res.start, drv_data->size, drv_data->res.name); - drv_data->rm_nb.notifier_call = hw_fence_rm_cb; - drv_data->rm_nb.priority = INT_MAX; - notifier_ret = gh_rm_register_notifier(&drv_data->rm_nb); - HWFNC_DBG_INIT("notifier: ret:%d peer_name:%d notifier_ret:%d\n", ret, - drv_data->peer_name, notifier_ret); - if (notifier_ret) { - HWFNC_ERR_ONCE("fail to register notifier ret:%d\n", notifier_ret); - return -EPROBE_DEFER; - } - - return 0; + return ret; } char *_get_mem_reserve_type(enum hw_fence_mem_reserve type) From c6fdeb9c5a1b8722f23dfe83bc60277ed51b9ebe Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 20 Oct 2023 10:12:40 -0700 Subject: [PATCH 106/166] mm-drivers: hw_fence: add support to signal fctl from hlos for clients Some clients (like GPU) only update the client tx queue when they need to signal the fence immediately. This change adds support for synx_signal to also trigger the signal to the fence controller on behalf of the client after updating the client tx queue. Change-Id: I0cecb42a56f966dd5cb781fb31430f1a22707163 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_ipc.h | 16 +- hw_fence/include/hw_fence_drv_priv.h | 6 +- hw_fence/src/hw_fence_drv_ipc.c | 199 ++++++++++--------- hw_fence/src/hw_fence_drv_priv.c | 2 +- hw_fence/src/msm_hw_fence.c | 5 +- hw_fence/src/msm_hw_fence_synx_translation.c | 19 +- 6 files changed, 144 insertions(+), 103 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_ipc.h b/hw_fence/include/hw_fence_drv_ipc.h index 610f71469a..776f9e013a 100644 --- a/hw_fence/include/hw_fence_drv_ipc.h +++ b/hw_fence/include/hw_fence_drv_ipc.h @@ -155,14 +155,24 @@ int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 clien bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id); /** - * hw_fence_ipcc_needs_ipc_irq() - Returns bool to indicate if client needs ipc interrupt for - * already signaled fences + * hw_fence_ipcc_signaled_needs_ipc_irq() - Returns bool to indicate if client needs ipc interrupt + * for already signaled fences * @drv_data: driver data. * @client_id: hw fence driver client id. * * Return: true if client needs ipc interrupt for signaled fences, false otherwise */ -bool hw_fence_ipcc_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id); +bool hw_fence_ipcc_signaled_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id); + +/** + * hw_fence_ipcc_txq_update_needs_ipc_irq() - Returns bool to indicate if client needs ipc interrupt + * when updating client tx queue in hlos + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * Return: true if client needs ipc interrupt when updating client tx queue, false otherwise + */ +bool hw_fence_ipcc_txq_update_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id); /** * hw_fence_ipcc_get_signaled_clients_mask() - Returns mask to indicate signals for which clients diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 741318fb69..d0269d56ee 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -196,7 +196,8 @@ enum payload_type { * @ipc_client_vid: virtual id of the ipc client for this hw fence driver client * @ipc_client_pid: physical id of the ipc client for this hw fence driver client * @update_rxq: bool to indicate if client uses rx-queue - * @send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences + * @signaled_send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences + * @txq_update_send_ipc: bool to indicate if client requires ipc interrupt for txq updates * @context_id: context id for fences created internally * @seqno: sequence no for fences created internally * @wait_queue: wait queue for the validation clients @@ -215,7 +216,8 @@ struct msm_hw_fence_client { int ipc_client_vid; int ipc_client_pid; bool update_rxq; - bool send_ipc; + bool signaled_send_ipc; + bool txq_update_send_ipc; u64 context_id; atomic_t seqno; #if IS_ENABLED(CONFIG_DEBUG_FS) diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index ad714c3017..291220b14d 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -35,7 +35,8 @@ struct hw_fence_client_ipc_map { int ipc_client_id_phys; int ipc_signal_id; bool update_rxq; - bool send_ipc; + bool signaled_send_ipc; + bool txq_update_send_ipc; }; /** @@ -47,33 +48,33 @@ struct hw_fence_client_ipc_map { * Note that the index of this struct must match the enum hw_fence_client_id */ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_IPC_MAP_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true},/*ctrl q*/ - {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false},/*ctx0 */ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 0, false, true},/* ctl0 */ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 1, false, true},/* ctl1 */ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 2, false, true},/* ctl2 */ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 3, false, true},/* ctl3 */ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 4, false, true},/* ctl4 */ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 5, false, true},/* ctl5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true, false}, + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false, true}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 1, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 2, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 3, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 4, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 5, false, true, false}, #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, false},/*val0*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, false},/*val1*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, false},/*val2*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, false},/*val3*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, false},/*val4*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, false},/*val5*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, false},/*val6*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, false, true}, #else - {0, 0, 0, false, false}, /* val0 */ - {0, 0, 0, false, false}, /* val1 */ - {0, 0, 0, false, false}, /* val2 */ - {0, 0, 0, false, false}, /* val3 */ - {0, 0, 0, false, false}, /* val4 */ - {0, 0, 0, false, false}, /* val5 */ - {0, 0, 0, false, false}, /* val6 */ + {0, 0, 0, false, false, false}, /* val0 */ + {0, 0, 0, false, false, false}, /* val1 */ + {0, 0, 0, false, false, false}, /* val2 */ + {0, 0, 0, false, false, false}, /* val3 */ + {0, 0, 0, false, false, false}, /* val4 */ + {0, 0, 0, false, false, false}, /* val5 */ + {0, 0, 0, false, false, false}, /* val6 */ #endif /* CONFIG_DEBUG_FS */ - {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_VID, 0, true, true}, /* ipe */ - {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_VID, 0, true, true}, /* vpu */ + {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_VID, 0, true, true, false}, + {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_VID, 0, true, true, false}, }; /** @@ -88,41 +89,41 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_IPC_MAP_MAX] = * HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC). */ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_IPC_MAP_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 1, true, true},/*ctrlq */ - {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, false, false},/* ctx0*/ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 0, false, true},/* ctl0 */ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 1, false, true},/* ctl1 */ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 2, false, true},/* ctl2 */ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 3, false, true},/* ctl3 */ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 4, false, true},/* ctl4 */ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 5, false, true},/* ctl5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 1, true, true, false}, + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, false, false, true}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 1, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 2, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 3, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 4, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 5, false, true, false}, #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, false},/*val0*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, false},/*val1*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, false},/*val2*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, false},/*val3*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, false},/*val4*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, false},/*val5*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, false},/*val6*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, false, true}, #else - {0, 0, 0, false, false}, /* val0 */ - {0, 0, 0, false, false}, /* val1 */ - {0, 0, 0, false, false}, /* val2 */ - {0, 0, 0, false, false}, /* val3 */ - {0, 0, 0, false, false}, /* val4 */ - {0, 0, 0, false, false}, /* val5 */ - {0, 0, 0, false, false}, /* val6 */ + {0, 0, 0, false, false, false}, /* val0 */ + {0, 0, 0, false, false, false}, /* val1 */ + {0, 0, 0, false, false, false}, /* val2 */ + {0, 0, 0, false, false, false}, /* val3 */ + {0, 0, 0, false, false, false}, /* val4 */ + {0, 0, 0, false, false, false}, /* val5 */ + {0, 0, 0, false, false, false}, /* val6 */ #endif /* CONFIG_DEBUG_FS */ - {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID, 0, true, true}, /* ipe */ - {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true}, /* vpu */ - {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, true},/* ife0*/ - {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, true},/* ife1*/ - {HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, true},/* ife2*/ - {HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, true},/* ife3*/ - {HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, true},/* ife4*/ - {HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, true},/* ife5*/ - {HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, true},/* ife6*/ - {HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, true},/* ife7*/ + {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID, 0, true, true, false}, + {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, true, false}, }; /** @@ -137,41 +138,47 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_IPC_MAP_MAX] * HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC). */ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_sun[HW_FENCE_IPC_MAP_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 0, true, true},/*ctrlq */ - {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, false, false},/* ctx0*/ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 0, false, true}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 1, false, true}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 2, false, true}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 3, false, true}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 4, false, true}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 5, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 0, true, true, false}, + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, false, false, true}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 0, false, true, + false}, /* ctl0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 1, false, true, + false}, /* ctl1 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 2, false, true, + false}, /* ctl2 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 3, false, true, + false}, /* ctl3 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 4, false, true, + false}, /* ctl4 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 5, false, true, + false}, /* ctl5 */ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, false},/*val0*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, false},/*val1*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, false},/*val2*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, false},/*val3*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, false},/*val4*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, false},/*val5*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, false},/*val6*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, false, true}, #else - {0, 0, 0, false, false}, /* val0 */ - {0, 0, 0, false, false}, /* val1 */ - {0, 0, 0, false, false}, /* val2 */ - {0, 0, 0, false, false}, /* val3 */ - {0, 0, 0, false, false}, /* val4 */ - {0, 0, 0, false, false}, /* val5 */ - {0, 0, 0, false, false}, /* val6 */ + {0, 0, 0, false, false, false}, /* val0 */ + {0, 0, 0, false, false, false}, /* val1 */ + {0, 0, 0, false, false, false}, /* val2 */ + {0, 0, 0, false, false, false}, /* val3 */ + {0, 0, 0, false, false, false}, /* val4 */ + {0, 0, 0, false, false, false}, /* val5 */ + {0, 0, 0, false, false, false}, /* val6 */ #endif /* CONFIG_DEBUG_FS */ - {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID_SUN, 0, true, true}, /*ipe*/ - {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true}, /* vpu */ - {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, true},/* ife0*/ - {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, true},/* ife1*/ - {HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, true},/* ife2*/ - {HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, true},/* ife3*/ - {HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, true},/* ife4*/ - {HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, true},/* ife5*/ - {HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, true},/* ife6*/ - {HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, true},/* ife7*/ + {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID_SUN, 0, true, true, false}, + {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, true, false}, }; int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id) @@ -206,12 +213,20 @@ bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int c return drv_data->ipc_clients_table[client_id].update_rxq; } -bool hw_fence_ipcc_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id) +bool hw_fence_ipcc_signaled_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id) { - if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + if (!drv_data || client_id >= drv_data->clients_num) return false; - return drv_data->ipc_clients_table[client_id].send_ipc; + return drv_data->ipc_clients_table[client_id].signaled_send_ipc; +} + +bool hw_fence_ipcc_txq_update_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id) +{ + if (!drv_data || client_id >= drv_data->clients_num) + return false; + + return drv_data->ipc_clients_table[client_id].txq_update_send_ipc; } /** diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 13006a603b..89180db90d 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1688,7 +1688,7 @@ static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, HW_FENCE_RX_QUEUE - 1); /* Signal the hw fence now */ - if (hw_fence_client->send_ipc) + if (hw_fence_client->signaled_send_ipc) hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, hw_fence_client->ipc_signal_id); } diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 0c4fb2e2f9..9ae8f4df25 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -87,7 +87,10 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, } hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id); - hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id); + hw_fence_client->signaled_send_ipc = hw_fence_ipcc_signaled_needs_ipc_irq(hw_fence_drv_data, + client_id); + hw_fence_client->txq_update_send_ipc = + hw_fence_ipcc_txq_update_needs_ipc_irq(hw_fence_drv_data, client_id); hw_fence_client->queues_num = hw_fence_utils_get_queues_num(hw_fence_drv_data, client_id); if (!hw_fence_client->queues_num || (hw_fence_client->update_rxq && diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index cdec5989e2..0a2cb85923 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -190,26 +190,37 @@ static int synx_hwfence_release(struct synx_session *session, u32 h_synx) static int synx_hwfence_signal(struct synx_session *session, u32 h_synx, enum synx_signal_status status) { + struct msm_hw_fence_client *hw_fence_client; u32 error; int ret; - if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || !session->client || !(h_synx & SYNX_HW_FENCE_HANDLE_FLAG) || !(status == SYNX_STATE_SIGNALED_SUCCESS || status == SYNX_STATE_SIGNALED_CANCEL || status > SYNX_STATE_SIGNALED_MAX)) { - HWFNC_ERR("invalid session:0x%pK synx_id:%d h_synx:%u status:%u\n", session, - IS_ERR_OR_NULL(session) ? -1 : session->type, h_synx, status); + HWFNC_ERR("invalid session:0x%pK synx_id:%d client:0x%pK h_synx:%u status:%u\n", + session, IS_ERR_OR_NULL(session) ? -1 : session->type, + IS_ERR_OR_NULL(session) ? NULL : session->client, h_synx, status); return -SYNX_INVALID; } error = hw_fence_interop_to_hw_fence_error(status); h_synx &= HW_FENCE_HANDLE_INDEX_MASK; ret = msm_hw_fence_update_txq(session->client, h_synx, 0, error); - if (ret) + if (ret) { HWFNC_ERR("synx_id:%d failed to signal fence h_synx:%u status:%d ret:%d\n", session->type, h_synx, status, ret); + goto error; + } + hw_fence_client = (struct msm_hw_fence_client *)session->client; + if (hw_fence_client->txq_update_send_ipc) + hw_fence_ipcc_trigger_signal(hw_fence_drv_data, + hw_fence_client->ipc_client_pid, hw_fence_drv_data->ipcc_fctl_vid, + hw_fence_client->ipc_signal_id); + +error: return hw_fence_interop_to_synx_status(ret); } From 47a7fde5426997e2174353171ab003564bb26d9b Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 24 Oct 2023 11:03:42 -0700 Subject: [PATCH 107/166] mm-drivers: hw_fence: add soccp power vote for validation clients This change adds and removes soccp power vote for registration and deregistration of hw-fence validation clients respectively. Change-Id: I66f6749465c988050f5038a07749acece902be0b Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 3 +++ hw_fence/src/hw_fence_drv_utils.c | 15 ++++++++--- hw_fence/src/msm_hw_fence.c | 37 ++++++++++++++++++++++++++++ 3 files changed, 52 insertions(+), 3 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index d0269d56ee..3d765e0800 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -14,6 +14,7 @@ #include #include #include +#include #include "msm_hw_fence.h" /* max u64 to indicate invalid fence */ @@ -380,6 +381,7 @@ struct hw_fence_signal_cb { * @ipcc_val_initialized: flag to indicate if val is initialized * @dma_fence_table_lock: lock to synchronize access to dma-fence table * @dma_fence_table: table with internal dma-fences for hw-fences + * @soccp_rproc: soccp rproc object used to set power vote * @has_soccp: flag to indicate if soccp is present (otherwise vm is used) */ struct hw_fence_driver_data { @@ -474,6 +476,7 @@ struct hw_fence_driver_data { DECLARE_HASHTABLE(dma_fence_table, DMA_FENCE_HASH_TABLE_BIT); /* soccp is present */ + struct rproc *soccp_rproc; bool has_soccp; }; diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 814220105a..5b3b31dbc6 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -1089,6 +1089,18 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) int ret; size_t size; u32 val = 0; + phandle ph; + + /* check presence of soccp */ + ret = of_property_read_u32(drv_data->dev->of_node, "soccp_controller", &ph); + if (!ret) { + drv_data->has_soccp = true; + drv_data->soccp_rproc = rproc_get_by_phandle(ph); + if (IS_ERR_OR_NULL(drv_data->soccp_rproc)) { + HWFNC_ERR("failed to find rproc for phandle:%u\n", ph); + return -EINVAL; + } + } ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-table-entries", &val); if (ret || !val) { @@ -1146,9 +1158,6 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) if (!drv_data->clients) return -ENOMEM; - /* check presence of soccp */ - drv_data->has_soccp = of_property_read_bool(drv_data->dev->of_node, "soccp_controller"); - HWFNC_DBG_INIT("table: entries=%u mem_size=%u queue: entries=%u\b", drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size, drv_data->hw_fence_queue_entries); diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 9ae8f4df25..8324b5fc83 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -8,6 +8,10 @@ #include #include #include +#include +#if (KERNEL_VERSION(6, 5, 0) <= LINUX_VERSION_CODE) +#include +#endif #include "hw_fence_drv_priv.h" #include "hw_fence_drv_utils.h" @@ -18,6 +22,26 @@ struct hw_fence_driver_data *hw_fence_drv_data; bool hw_fence_driver_enable; +static int _set_power_vote_if_needed(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, bool state) +{ + int ret = 0; + +#if IS_ENABLED(CONFIG_DEBUG_FS) + if (drv_data->has_soccp && + hw_fence_client->client_id_ext >= HW_FENCE_CLIENT_ID_VAL0 && + hw_fence_client->client_id_ext <= HW_FENCE_CLIENT_ID_VAL6) { +#if (KERNEL_VERSION(6, 5, 0) <= LINUX_VERSION_CODE) + ret = rproc_set_state(drv_data->soccp_rproc, state); +#else + ret = -EINVAL; +#endif + } +#endif /* CONFIG_DEBUG_FS */ + + return ret; +} + void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, struct msm_hw_fence_mem_addr *mem_descriptor) { @@ -133,6 +157,13 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, init_waitqueue_head(&hw_fence_client->wait_queue); #endif /* CONFIG_DEBUG_FS */ + ret = _set_power_vote_if_needed(hw_fence_drv_data, hw_fence_client, true); + if (ret) { + HWFNC_ERR("set soccp power vote failed, fail client:%u registration ret:%d\n", + hw_fence_client->client_id_ext, ret); + goto error; + } + return (void *)hw_fence_client; error: @@ -147,6 +178,7 @@ EXPORT_SYMBOL_GPL(msm_hw_fence_register); int msm_hw_fence_deregister(void *client_handle) { struct msm_hw_fence_client *hw_fence_client; + int ret; if (IS_ERR_OR_NULL(client_handle)) { HWFNC_ERR("Invalid client handle\n"); @@ -161,6 +193,11 @@ int msm_hw_fence_deregister(void *client_handle) HWFNC_DBG_H("+\n"); + ret = _set_power_vote_if_needed(hw_fence_drv_data, hw_fence_client, false); + if (ret) + HWFNC_ERR("remove soccp power vote failed, fail client:%u deregistration ret:%d\n", + hw_fence_client->client_id_ext, ret); + /* Free all the allocated resources */ hw_fence_cleanup_client(hw_fence_drv_data, hw_fence_client); From ce88a60ec882f392fe6cce52d99051e9012e9a5d Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 9 Feb 2023 10:22:22 -0800 Subject: [PATCH 108/166] mm-drivers: hw_fence: enable caching of hwfence shared memory Currently, the carved-out memory region shared between HW Fence Driver and Fence CTL is memory mapped as uncached memory. Change memory mapping to support caching. Change-Id: Iefa6772086e874e6f4c80070b40834f62bb2d788 Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_utils.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 814220105a..db080c009b 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -688,10 +688,10 @@ static int _init_soccp_mem(struct hw_fence_driver_data *drv_data) #if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE) ret = iommu_map(domain, drv_data->res.start, drv_data->res.start, drv_data->size, - IOMMU_READ | IOMMU_WRITE, GFP_KERNEL); + IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL); #else ret = iommu_map(domain, drv_data->res.start, drv_data->res.start, drv_data->size, - IOMMU_READ | IOMMU_WRITE); + IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE); #endif if (ret) HWFNC_ERR("failed to one-to-one map for soccp smmu addr:0x%llx sz:%lx ret:%d\n", @@ -713,7 +713,6 @@ int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) struct device_node *node = drv_data->dev->of_node; struct device_node *node_compat; const char *compat = "qcom,msm-hw-fence-mem"; - struct device *dev = drv_data->dev; struct device_node *np; int ret; @@ -736,8 +735,13 @@ int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) return -EINVAL; } - drv_data->io_mem_base = devm_ioremap_wc(dev, drv_data->res.start, - resource_size(&drv_data->res)); + if (drv_data->has_soccp) + drv_data->io_mem_base = memremap(drv_data->res.start, resource_size(&drv_data->res), + MEMREMAP_WB); + else + drv_data->io_mem_base = devm_ioremap_wc(drv_data->dev, drv_data->res.start, + resource_size(&drv_data->res)); + if (!drv_data->io_mem_base) { HWFNC_ERR("ioremap failed!\n"); return -ENXIO; From fb7af013034c125791847d58d8dc4061e15817aa Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 30 Jan 2024 15:22:47 -0800 Subject: [PATCH 109/166] mm-drivers: hw_fence: add hw-fence validation client ID Add hw-fence validation client IDs which will be defined in synx header. Change-Id: I6936364b3c9f43269a4547cf648478de2b9ba5ec Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence_synx_translation.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 0a2cb85923..e38d3c36f1 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -19,6 +19,10 @@ */ #define MAX_SUPPORTED_DPU0 (HW_FENCE_CLIENT_ID_CTL5 - HW_FENCE_CLIENT_ID_CTL0) +#ifndef SYNX_CLIENT_HW_FENCE_TEST_CTX0 +#define SYNX_CLIENT_HW_FENCE_TEST_CTX0 2368 +#endif + static enum hw_fence_client_id _get_hw_fence_client_id(enum synx_client_id synx_client_id) { enum hw_fence_client_id hw_fence_client_id; From fb0a18ee349f4842ee312ea818c6753d76305c42 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 25 Jul 2023 14:02:07 -0700 Subject: [PATCH 110/166] mm-drivers: hw_fence: add val client wait and signal Add support for hw-fence validation clients to signal and wait on hw-fences. Change-Id: I5a530b1bccd286d97af6608558c56d9a3d6239bf Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_debug.h | 2 + hw_fence/src/hw_fence_drv_debug.c | 58 ++++++++++++++++++++ hw_fence/src/hw_fence_ioctl.c | 53 +++--------------- hw_fence/src/msm_hw_fence_synx_translation.c | 38 ++++++++++++- 4 files changed, 104 insertions(+), 47 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index 2f38c6e3b7..6cface7cc6 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -74,6 +74,8 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data); #if IS_ENABLED(CONFIG_DEBUG_FS) int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, int client_id); +int hw_fence_debug_wait_val(struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, + u64 hash, u64 timeout_ms, u32 *error); void hw_fence_debug_dump_queues(enum hw_fence_drv_prio prio, struct msm_hw_fence_client *hw_fence_client); diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 720bc04de5..c8cd80f482 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "hw_fence_drv_priv.h" #include "hw_fence_drv_debug.h" @@ -28,6 +29,8 @@ #define HFENCE_EVT_MSG "[%d][cpu:%d][%llu] data[%d]:%s\n" +#define ktime_compare_safe(A, B) ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0)) + u32 msm_hw_fence_debug_level = HW_FENCE_PRINTK; /** @@ -1172,6 +1175,61 @@ int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, return 0; } +int hw_fence_debug_wait_val(struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, + u64 hash, u64 timeout_ms, u32 *error) +{ + int ret, read = 1, queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */ + struct msm_hw_fence_queue_payload payload; + ktime_t cur_ktime, exp_ktime; + u64 context, seqno; + + if (!hw_fence_client) { + HWFNC_ERR("invalid client\n"); + return -EINVAL; + } + + exp_ktime = ktime_add_ms(ktime_get(), timeout_ms); + do { + ret = wait_event_timeout(hw_fence_client->wait_queue, + atomic_read(&hw_fence_client->val_signal) > 0, + msecs_to_jiffies(timeout_ms)); + cur_ktime = ktime_get(); + } while ((atomic_read(&hw_fence_client->val_signal) <= 0) && (ret == 0) && + ktime_compare_safe(exp_ktime, cur_ktime) > 0); + + if (!ret) { + HWFNC_ERR("timed out waiting for the client signal %llu\n", timeout_ms); + return -ETIMEDOUT; + } + + /* clear doorbell signal flag */ + atomic_set(&hw_fence_client->val_signal, 0); + context = fence ? fence->context : 0; + seqno = fence ? fence->seqno : 0; + + while (read) { + read = hw_fence_read_queue(hw_fence_client, &payload, queue_type); + if (read < 0) { + HWFNC_ERR("unable to read client rxq client_id:%u\n", + hw_fence_client->client_id); + break; + } + HWFNC_DBG_L("rxq read: hash:%llu, flags:%llu, error:%u\n", + payload.hash, payload.flags, payload.error); + if ((fence && payload.ctxt_id == context && payload.seqno == seqno) || + hash == payload.hash) { + *error = payload.error; + return 0; + } + } + + HWFNC_ERR("fence received did not match the fence expected\n"); + HWFNC_ERR("received: hash:%llu ctx:%llu seq:%llu expected: hash:%llu ctx:%llu seq:%llu\n", + payload.hash, payload.ctxt_id, payload.seqno, hash, context, seqno); + + return read; +} + static const struct file_operations hw_fence_reset_client_fops = { .open = simple_open, .write = hw_fence_dbg_reset_client_wr, diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 0fbb69b30e..c01ce37431 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -1,12 +1,11 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include #include -#include #include #include @@ -37,8 +36,6 @@ .name = #ioctl \ } -#define ktime_compare_safe(A, B) ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0)) - /** * struct hw_sync_obj - per client hw sync object. * @context: context id used to create fences. @@ -483,11 +480,10 @@ static long hw_sync_ioctl_fence_signal(struct hw_sync_obj *obj, unsigned long ar static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg) { struct msm_hw_fence_client *hw_fence_client; - struct msm_hw_fence_queue_payload payload; struct hw_fence_sync_wait_data data; struct dma_fence *fence; - ktime_t cur_ktime, exp_ktime; - int fd, ret, read = 1, queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */ + int fd, ret; + u32 error; if (!_is_valid_client(obj)) return -EINVAL; @@ -510,48 +506,15 @@ static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg) return -EINVAL; } - exp_ktime = ktime_add_ms(ktime_get(), data.timeout_ms); - do { - ret = wait_event_timeout(hw_fence_client->wait_queue, - atomic_read(&hw_fence_client->val_signal) > 0, - msecs_to_jiffies(data.timeout_ms)); - cur_ktime = ktime_get(); - } while ((atomic_read(&hw_fence_client->val_signal) <= 0) && (ret == 0) && - ktime_compare_safe(exp_ktime, cur_ktime) > 0); - - if (!ret) { - HWFNC_ERR("timed out waiting for the client signal %llu\n", data.timeout_ms); - /* Decrement the refcount that hw_sync_get_fence increments */ - dma_fence_put(fence); - return -ETIMEDOUT; - } - - /* clear doorbell signal flag */ - atomic_set(&hw_fence_client->val_signal, 0); - - while (read) { - read = hw_fence_read_queue(obj->client_handle, &payload, queue_type); - if (read < 0) { - HWFNC_ERR("unable to read client rxq client_id:%d\n", obj->client_id); - break; - } - HWFNC_DBG_L("rxq read: hash:%llu, flags:%llu, error:%d\n", - payload.hash, payload.flags, payload.error); - if (payload.ctxt_id == fence->context && payload.seqno == fence->seqno) { - /* Decrement the refcount that hw_sync_get_fence increments */ - dma_fence_put(fence); - return 0; - } - } + ret = hw_fence_debug_wait_val(hw_fence_client, fence, 0, data.timeout_ms, &error); + if (ret) + HWFNC_ERR("failed to wait for hw-fence client:%d ctx:%llu seq:%llu\n", + hw_fence_client->client_id, fence->context, fence->seqno); /* Decrement the refcount that hw_sync_get_fence increments */ dma_fence_put(fence); - HWFNC_ERR("fence received did not match the fence expected\n"); - HWFNC_ERR("fence received: ctx:%llu seqno:%llu fence expected: ctx:%llu seqno:%llu\n", - payload.ctxt_id, payload.seqno, fence->context, fence->seqno); - - return read; + return ret; } static long hw_sync_ioctl_reset_client(struct hw_sync_obj *obj, unsigned long arg) diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index e38d3c36f1..87a0991e3c 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -14,10 +14,11 @@ #include "hw_fence_drv_interop.h" /** - * MAX_SUPPORTED_DPU0: - * Maximum number of dpu clients supported + * MAX_SUPPORTED_DPU0: Maximum number of dpu clients supported + * MAX_SUPPORTED_TEST: Maximum number of validation clients supported */ #define MAX_SUPPORTED_DPU0 (HW_FENCE_CLIENT_ID_CTL5 - HW_FENCE_CLIENT_ID_CTL0) +#define MAX_SUPPORTED_TEST (HW_FENCE_CLIENT_ID_VAL6 - HW_FENCE_CLIENT_ID_VAL1) #ifndef SYNX_CLIENT_HW_FENCE_TEST_CTX0 #define SYNX_CLIENT_HW_FENCE_TEST_CTX0 2368 @@ -50,6 +51,10 @@ static enum hw_fence_client_id _get_hw_fence_client_id(enum synx_client_id synx_ hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IFE0_CTX0 + HW_FENCE_CLIENT_ID_IFE0; break; + case SYNX_CLIENT_HW_FENCE_TEST_CTX0 ... SYNX_CLIENT_HW_FENCE_TEST_CTX0 + MAX_SUPPORTED_TEST: + hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_TEST_CTX0 + + HW_FENCE_CLIENT_ID_VAL0; + break; default: HWFNC_ERR("Unsupported hw-fence client for synx_id:%d\n", synx_client_id); hw_fence_client_id = HW_FENCE_CLIENT_MAX; @@ -228,6 +233,34 @@ error: return hw_fence_interop_to_synx_status(ret); } +static int synx_hwfence_wait(struct synx_session *session, u32 h_synx, u64 timeout_ms) +{ + int ret = -EINVAL; + u32 error; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || + !(h_synx & SYNX_HW_FENCE_HANDLE_FLAG)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type); + return -SYNX_INVALID; + } + +#if IS_ENABLED(CONFIG_DEBUG_FS) + h_synx &= HW_FENCE_HANDLE_INDEX_MASK; + if (session->type >= SYNX_CLIENT_HW_FENCE_TEST_CTX0 + && session->type <= SYNX_CLIENT_HW_FENCE_TEST_CTX0 + MAX_SUPPORTED_TEST) + ret = hw_fence_debug_wait_val(session->client, NULL, h_synx, timeout_ms, &error); +#endif /* CONFIG_DEBUG_FS */ + + if (ret) { + HWFNC_ERR("synx_id:%d failed to wait on fence h_synx:%u timeout_ms:%llu\n", + session->type, h_synx, timeout_ms); + return hw_fence_interop_to_synx_status(ret); + } + + return error ? error : SYNX_STATE_SIGNALED_SUCCESS; +} + int synx_hwfence_recover(enum synx_client_id id) { int ret; @@ -449,6 +482,7 @@ int synx_hwfence_init_ops(struct synx_ops *hwfence_ops) hwfence_ops->import = synx_hwfence_import; hwfence_ops->get_fence = synx_hwfence_get_fence; hwfence_ops->get_status = synx_hwfence_get_status; + hwfence_ops->wait = synx_hwfence_wait; return SYNX_SUCCESS; } From 16f16414206f7cd81c9093d0779720d55d4510e8 Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 11 Dec 2023 15:44:10 -0800 Subject: [PATCH 111/166] mm-drivers: hw_fence: extend ioctl fence wait support Add support for test cases in which validation clients have multiple fences pending to be processed by fence wait. HW Fence Driver may process doorbell (on target with vm) or ipcc signal (on target with soccp) for multiple fences, or process signal after associated fence has already been processed by val client. Thus, val client must keep validation signal set if there are still fences pending processing by fence wait, and wait again (within timeout) if expected fence has yet to be received by validation client. Change-Id: Ifa285b3ebf10c825af26aa68dfb0af866ea0b2ed Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_debug.c | 65 +++++++++++++++++++------------ 1 file changed, 40 insertions(+), 25 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index c8cd80f482..249fdba4a6 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -1175,35 +1175,16 @@ int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, return 0; } -int hw_fence_debug_wait_val(struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, - u64 hash, u64 timeout_ms, u32 *error) +static long _process_val_signal(struct msm_hw_fence_client *hw_fence_client, + struct dma_fence *fence, u64 hash, u32 *error) { - int ret, read = 1, queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */ struct msm_hw_fence_queue_payload payload; - ktime_t cur_ktime, exp_ktime; + int read = 1, queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */ u64 context, seqno; - if (!hw_fence_client) { - HWFNC_ERR("invalid client\n"); - return -EINVAL; - } - - exp_ktime = ktime_add_ms(ktime_get(), timeout_ms); - do { - ret = wait_event_timeout(hw_fence_client->wait_queue, - atomic_read(&hw_fence_client->val_signal) > 0, - msecs_to_jiffies(timeout_ms)); - cur_ktime = ktime_get(); - } while ((atomic_read(&hw_fence_client->val_signal) <= 0) && (ret == 0) && - ktime_compare_safe(exp_ktime, cur_ktime) > 0); - - if (!ret) { - HWFNC_ERR("timed out waiting for the client signal %llu\n", timeout_ms); - return -ETIMEDOUT; - } - - /* clear doorbell signal flag */ + /* clear validation signal flag */ atomic_set(&hw_fence_client->val_signal, 0); + context = fence ? fence->context : 0; seqno = fence ? fence->seqno : 0; @@ -1227,7 +1208,41 @@ int hw_fence_debug_wait_val(struct msm_hw_fence_client *hw_fence_client, struct HWFNC_ERR("received: hash:%llu ctx:%llu seq:%llu expected: hash:%llu ctx:%llu seq:%llu\n", payload.hash, payload.ctxt_id, payload.seqno, hash, context, seqno); - return read; + return -EINVAL; +} + +int hw_fence_debug_wait_val(struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, + u64 hash, u64 timeout_ms, u32 *error) +{ + ktime_t cur_ktime, exp_ktime; + int ret = -EINVAL; + + if (!hw_fence_client) { + HWFNC_ERR("invalid client\n"); + return -EINVAL; + } + + exp_ktime = ktime_add_ms(ktime_get(), timeout_ms); + while (ret) { + do { + ret = wait_event_timeout(hw_fence_client->wait_queue, + atomic_read(&hw_fence_client->val_signal) > 0, + msecs_to_jiffies(timeout_ms)); + cur_ktime = ktime_get(); + } while ((atomic_read(&hw_fence_client->val_signal) <= 0) && (ret == 0) && + ktime_compare_safe(exp_ktime, cur_ktime) > 0); + + if (!ret) { + HWFNC_ERR("timed out waiting for the client signal %llu\n", timeout_ms); + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + return -ETIMEDOUT; + } + ret = _process_val_signal(hw_fence_client, fence, hash, error); + /* if val client fails to find expected fence, keep waiting until timeout */ + } + + return ret; } static const struct file_operations hw_fence_reset_client_fops = { From 6ab144d3209ce92c034663d4d54d015a852ce7bf Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 11 Jan 2024 13:06:46 -0800 Subject: [PATCH 112/166] mm-drivers: hw_fence: modify data structures to be 64-byte aligned SOCCP requires 64-byte alignment for cache memory operations. Modify all hw-fence data structures to follow 64-byte alignment on targets where SOCCP is present. This includes hw-fences, locks, queue table header, and queue header data structures. Change-Id: Ife51e9c7d3c10cce72411f3d9dce425a0fcd06fa Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_debug.h | 7 +- hw_fence/include/hw_fence_drv_priv.h | 57 ++++--- hw_fence/include/msm_hw_fence.h | 64 +++++++ hw_fence/src/hw_fence_drv_debug.c | 35 ++-- hw_fence/src/hw_fence_drv_priv.c | 166 ++++++++++--------- hw_fence/src/hw_fence_drv_utils.c | 26 +-- hw_fence/src/hw_fence_ioctl.c | 3 +- hw_fence/src/msm_hw_fence.c | 5 +- hw_fence/src/msm_hw_fence_synx_translation.c | 3 +- 9 files changed, 230 insertions(+), 136 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index 6cface7cc6..3b7e06bcf8 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -74,10 +74,11 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data); #if IS_ENABLED(CONFIG_DEBUG_FS) int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, int client_id); -int hw_fence_debug_wait_val(struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, - u64 hash, u64 timeout_ms, u32 *error); +int hw_fence_debug_wait_val(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 hash, + u64 timeout_ms, u32 *error); -void hw_fence_debug_dump_queues(enum hw_fence_drv_prio prio, +void hw_fence_debug_dump_queues(struct hw_fence_driver_data *drv_data, enum hw_fence_drv_prio prio, struct msm_hw_fence_client *hw_fence_client); void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash, u32 count); diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index d0269d56ee..ff912a8cf5 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -28,27 +28,39 @@ #define HW_FENCE_CLIENT_QUEUES 2 /* Rx and Tx Queues */ /* hfi headers calculation */ -#define HW_FENCE_HFI_TABLE_HEADER_SIZE (sizeof(struct msm_hw_fence_hfi_queue_table_header)) -#define HW_FENCE_HFI_QUEUE_HEADER_SIZE (sizeof(struct msm_hw_fence_hfi_queue_header)) +#define HW_FENCE_HFI_TABLE_HEADER_SIZE(has_soccp) \ + ((has_soccp) ? (sizeof(struct msm_hw_fence_hfi_queue_table_header_v2)) : \ + (sizeof(struct msm_hw_fence_hfi_queue_table_header))) -#define HW_FENCE_HFI_CTRL_HEADERS_SIZE (HW_FENCE_HFI_TABLE_HEADER_SIZE + \ - (HW_FENCE_HFI_QUEUE_HEADER_SIZE * HW_FENCE_CTRL_QUEUES)) +#define HW_FENCE_HFI_QUEUE_HEADER_SIZE(has_soccp) \ + ((has_soccp) ? (sizeof(struct msm_hw_fence_hfi_queue_header_v2)) : \ + (sizeof(struct msm_hw_fence_hfi_queue_header))) -#define HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num) (HW_FENCE_HFI_TABLE_HEADER_SIZE + \ - (HW_FENCE_HFI_QUEUE_HEADER_SIZE * queues_num)) +#define HW_FENCE_HFI_CTRL_HEADERS_SIZE(has_soccp) (HW_FENCE_HFI_TABLE_HEADER_SIZE(has_soccp) + \ + (HW_FENCE_HFI_QUEUE_HEADER_SIZE(has_soccp) * HW_FENCE_CTRL_QUEUES)) + +#define HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num, has_soccp) \ + (HW_FENCE_HFI_TABLE_HEADER_SIZE(has_soccp) + \ + (HW_FENCE_HFI_QUEUE_HEADER_SIZE(has_soccp) * queues_num)) /* - * Max Payload size is the bigest size of the message that we can have in the CTRL queue - * in this case the max message is calculated like following, using 32-bits elements: - * 1 header + 1 msg-type + 1 client_id + 2 hash + 1 error + * CTRL queue uses same 64-byte aligned payload size as client queue. */ -#define HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE ((1 + 1 + 1 + 2 + 1) * sizeof(u32)) +#define HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE (sizeof(struct msm_hw_fence_queue_payload)) #define HW_FENCE_CTRL_QUEUE_PAYLOAD HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE #define HW_FENCE_CLIENT_QUEUE_PAYLOAD (sizeof(struct msm_hw_fence_queue_payload)) +#define HW_FENCE_CTRL_QUEUE_ENTRIES 64 + +/* + * On targets with SOCCP, client RxQ lock is 64-bit in size but each lock is at a separate 64-byte + * chunk of memory + */ +#define HW_FENCE_LOCK_IDX_OFFSET 8 /* Locks area for all clients with RxQ */ -#define HW_FENCE_MEM_LOCKS_SIZE(rxq_clients_num) (sizeof(u64) * rxq_clients_num) +#define HW_FENCE_MEM_LOCKS_SIZE(rxq_clients_num) (HW_FENCE_LOCK_IDX_OFFSET * sizeof(u64) * \ + rxq_clients_num) #define HW_FENCE_TX_QUEUE 1 #define HW_FENCE_RX_QUEUE 2 @@ -129,23 +141,13 @@ enum hw_fence_lookup_ops { /** * enum hw_fence_client_data_id - Enum with the clients having client_data, an optional * parameter passed from the waiting client and returned - * to it upon fence signaling. Only the first HW Fence - * Client for non-VAL clients (e.g. GFX, IPE, VPU) have - * client_data. + * to it upon fence signaling. * @HW_FENCE_CLIENT_DATA_ID_CTX0: GFX Client 0. - * @HW_FENCE_CLIENT_DATA_ID_IPE: IPE Client 0. - * @HW_FENCE_CLIENT_DATA_ID_VPU: VPU Client 0. - * @HW_FENCE_CLIENT_DATA_ID_VAL0: Debug validation client 0. - * @HW_FENCE_CLIENT_DATA_ID_VAL1: Debug validation client 1. * @HW_FENCE_MAX_CLIENTS_WITH_DATA: Max number of clients with data, also indicates an * invalid hw_fence_client_data_id */ enum hw_fence_client_data_id { HW_FENCE_CLIENT_DATA_ID_CTX0, - HW_FENCE_CLIENT_DATA_ID_IPE, - HW_FENCE_CLIENT_DATA_ID_VPU, - HW_FENCE_CLIENT_DATA_ID_VAL0, - HW_FENCE_CLIENT_DATA_ID_VAL1, HW_FENCE_MAX_CLIENTS_WITH_DATA, }; @@ -603,10 +605,13 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 hash, u32 error); inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data); -int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, - struct msm_hw_fence_queue_payload *payload, int queue_type); -int hw_fence_read_queue_helper(struct msm_hw_fence_queue *queue, - struct msm_hw_fence_queue_payload *payload); +int hw_fence_read_queue(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, + int queue_type); +int hw_fence_read_queue_helper(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_queue *queue, struct msm_hw_fence_queue_payload *payload); +void hw_fence_get_queue_idx_ptrs(struct hw_fence_driver_data *drv_data, void *va_header, + u32 **rd_idx_ptr, u32 **wr_idx_ptr, u32 **tx_wm_ptr); int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash, u64 client_data); diff --git a/hw_fence/include/msm_hw_fence.h b/hw_fence/include/msm_hw_fence.h index 026098c401..a3e096b5fa 100644 --- a/hw_fence/include/msm_hw_fence.h +++ b/hw_fence/include/msm_hw_fence.h @@ -97,6 +97,26 @@ struct msm_hw_fence_hfi_queue_table_header { u32 num_active_q; }; +/** + * struct msm_hw_fence_hfi_queue_table_header_v2 - Version 2 of HFI queue table structure. + * @version: HFI protocol version. + * @size: Queue table size in dwords. + * @qhdr0_offset: First queue header offset (dwords) in this table. + * @qhdr_size: Queue header size. + * @num_q: Number of queues defined in this table. + * @num_active_q: Number of active queues. + * @reserved: reserved memory used for 64-byte alignment + */ +struct msm_hw_fence_hfi_queue_table_header_v2 { + u32 version; + u32 size; + u32 qhdr0_offset; + u32 qhdr_size; + u32 num_q; + u32 num_active_q; + u32 reserved[10]; +}; + /** * struct msm_hw_fence_hfi_queue_header - HFI queue header structure. * @status: Active = 1, Inactive = 0. @@ -135,6 +155,50 @@ struct msm_hw_fence_hfi_queue_header { u32 write_index; }; +/** + * struct msm_hw_fence_hfi_queue_header - HFI queue header structure. + * @status: Active = 1, Inactive = 0. + * @start_addr: Starting address of the queue. + * @type: Queue type (rx/tx). + * @queue_size: Size of the queue. + * @pkt_size: Size of the queue packet entries, + * 0 - means variable size of message in the queue, + * non-zero - size of the packet, fixed. + * @pkt_drop_cnt: Number of packets drop by sender. + * @rx_wm: Receiver watermark, applicable in event driven mode. + * @tx_wm: Sender watermark, applicable in event driven mode. + * @rx_req: Receiver sets this bit if queue is empty. + * @tx_req: Sender sets this bit if queue is full. + * @rx_irq_status: Receiver sets this bit and triggers an interrupt to the + * sender after packets are dequeued. Sender clears this bit. + * @tx_irq_status: Sender sets this bit and triggers an interrupt to the + * receiver after packets are queued. Receiver clears this bit. + * @init_reserved: reservation for 64-byte alignment of read and write indexes + * @read_index: read index of the queue. + * @read_index_reserved: reservation for 64-byte alignment of read and write indexes + * @write_index: write index of the queue. + * @write_index_reserved: reservation for 64-byte alignment of read and write indexes + */ +struct msm_hw_fence_hfi_queue_header_v2 { + u32 status; + u32 start_addr; + u32 type; + u32 queue_size; + u32 pkt_size; + u32 pkt_drop_cnt; + u32 rx_wm; + u32 tx_wm; + u32 rx_req; + u32 tx_req; + u32 rx_irq_status; + u32 tx_irq_status; + u32 init_reserved[4]; + u32 read_index; + u32 read_index_reserved[15]; + u32 write_index; + u32 write_index_reserved[15]; +}; + /** * struct msm_hw_fence_mem_addr - Memory descriptor of the queue allocated by * the fence driver for each client during diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 249fdba4a6..8d8c137c9a 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -762,14 +762,13 @@ exit: return len; } -static void _dump_queue(enum hw_fence_drv_prio prio, struct msm_hw_fence_client *hw_fence_client, - int queue_type) +static void _dump_queue(struct hw_fence_driver_data *drv_data, enum hw_fence_drv_prio prio, + struct msm_hw_fence_client *hw_fence_client, int queue_type) { struct msm_hw_fence_queue *queue; - struct msm_hw_fence_hfi_queue_header *hfi_header; struct msm_hw_fence_queue_payload *payload; u64 timestamp; - u32 *read_ptr, queue_entries; + u32 *read_ptr, *rd_idx_ptr, *wr_idx_ptr, *tx_wm_ptr, queue_entries; int i; queue = &hw_fence_client->queues[queue_type - 1]; @@ -782,13 +781,13 @@ static void _dump_queue(enum hw_fence_drv_prio prio, struct msm_hw_fence_client queue, queue ? queue->va_header : NULL, queue ? queue->va_queue : NULL); return; } - hfi_header = (struct msm_hw_fence_hfi_queue_header *)queue->va_header; + hw_fence_get_queue_idx_ptrs(drv_data, queue->va_header, &rd_idx_ptr, &wr_idx_ptr, + &tx_wm_ptr); mb(); /* make sure data is ready before read */ HWFNC_DBG_DUMP(prio, "%s va:0x%pK rd_idx:%u wr_idx:%u tx_wm:%u q_size_bytes:%u\n", (queue_type == HW_FENCE_TX_QUEUE) ? "TX QUEUE" : "RX QUEUE", queue->va_queue, - hfi_header->read_index, hfi_header->write_index, hfi_header->tx_wm, - queue->q_size_bytes); + *rd_idx_ptr, *wr_idx_ptr, *tx_wm_ptr, queue->q_size_bytes); queue_entries = queue->q_size_bytes / HW_FENCE_CLIENT_QUEUE_PAYLOAD; for (i = 0; i < queue_entries; i++) { @@ -805,7 +804,7 @@ static void _dump_queue(enum hw_fence_drv_prio prio, struct msm_hw_fence_client } } -void hw_fence_debug_dump_queues(enum hw_fence_drv_prio prio, +void hw_fence_debug_dump_queues(struct hw_fence_driver_data *drv_data, enum hw_fence_drv_prio prio, struct msm_hw_fence_client *hw_fence_client) { if (!hw_fence_client) { @@ -815,8 +814,8 @@ void hw_fence_debug_dump_queues(enum hw_fence_drv_prio prio, HWFNC_DBG_DUMP(prio, "Queues for client %d\n", hw_fence_client->client_id); if (hw_fence_client->queues_num == HW_FENCE_CLIENT_QUEUES) - _dump_queue(prio, hw_fence_client, HW_FENCE_RX_QUEUE); - _dump_queue(prio, hw_fence_client, HW_FENCE_TX_QUEUE); + _dump_queue(drv_data, prio, hw_fence_client, HW_FENCE_RX_QUEUE); + _dump_queue(drv_data, prio, hw_fence_client, HW_FENCE_TX_QUEUE); } /** @@ -850,7 +849,7 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user HWFNC_ERR("client %d not initialized\n", client_id); return -EINVAL; } - hw_fence_debug_dump_queues(HW_FENCE_PRINTK, drv_data->clients[client_id]); + hw_fence_debug_dump_queues(drv_data, HW_FENCE_PRINTK, drv_data->clients[client_id]); return count; } @@ -1175,7 +1174,8 @@ int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, return 0; } -static long _process_val_signal(struct msm_hw_fence_client *hw_fence_client, +static long _process_val_signal(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 hash, u32 *error) { struct msm_hw_fence_queue_payload payload; @@ -1189,7 +1189,7 @@ static long _process_val_signal(struct msm_hw_fence_client *hw_fence_client, seqno = fence ? fence->seqno : 0; while (read) { - read = hw_fence_read_queue(hw_fence_client, &payload, queue_type); + read = hw_fence_read_queue(drv_data, hw_fence_client, &payload, queue_type); if (read < 0) { HWFNC_ERR("unable to read client rxq client_id:%u\n", hw_fence_client->client_id); @@ -1211,13 +1211,14 @@ static long _process_val_signal(struct msm_hw_fence_client *hw_fence_client, return -EINVAL; } -int hw_fence_debug_wait_val(struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, - u64 hash, u64 timeout_ms, u32 *error) +int hw_fence_debug_wait_val(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 hash, + u64 timeout_ms, u32 *error) { ktime_t cur_ktime, exp_ktime; int ret = -EINVAL; - if (!hw_fence_client) { + if (!hw_fence_client || !drv_data) { HWFNC_ERR("invalid client\n"); return -EINVAL; } @@ -1238,7 +1239,7 @@ int hw_fence_debug_wait_val(struct msm_hw_fence_client *hw_fence_client, struct dma_fence_put(fence); return -ETIMEDOUT; } - ret = _process_val_signal(hw_fence_client, fence, hash, error); + ret = _process_val_signal(drv_data, hw_fence_client, fence, hash, error); /* if val client fails to find expected fence, keep waiting until timeout */ } diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 89180db90d..d203d4abff 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -39,6 +39,29 @@ inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data) #endif /* HWFENCE_USE_SLEEP_TIMER */ } +/* on targets with soccp, read_index and write_index etc. fields are in different locations */ +void hw_fence_get_queue_idx_ptrs(struct hw_fence_driver_data *drv_data, void *va_header, + u32 **rd_idx_ptr, u32 **wr_idx_ptr, u32 **tx_wm_ptr) +{ + struct msm_hw_fence_hfi_queue_header *hfi_header; + struct msm_hw_fence_hfi_queue_header_v2 *hfi_header_v2; + + /* if soccp is present, use v2 header data structures */ + if (drv_data->has_soccp) { + hfi_header_v2 = va_header; + *rd_idx_ptr = &hfi_header_v2->read_index; + *wr_idx_ptr = &hfi_header_v2->write_index; + if (tx_wm_ptr) + *tx_wm_ptr = &hfi_header_v2->tx_wm; + } else { + hfi_header = va_header; + *rd_idx_ptr = &hfi_header->read_index; + *wr_idx_ptr = &hfi_header->write_index; + if (tx_wm_ptr) + *tx_wm_ptr = &hfi_header->tx_wm; + } +} + static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, enum hw_fence_mem_reserve mem_reserve_id, struct msm_hw_fence_mem_addr *mem_descriptor, @@ -51,6 +74,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, void *ptr, *qptr; phys_addr_t phys, qphys; u32 size, start_queue_offset, txq_idx_start = 0, txq_idx_factor = 1; + u32 *wr_idx_ptr, *rd_idx_ptr, *tx_wm_ptr; int headers_size, queue_size, payload_size; int start_padding = 0, end_padding = 0; int i, ret = 0; @@ -59,7 +83,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id); switch (mem_reserve_id) { case HW_FENCE_MEM_RESERVE_CTRL_QUEUE: - headers_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE; + headers_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE(drv_data->has_soccp); queue_size = drv_data->hw_fence_ctrl_queue_size; payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD; break; @@ -74,8 +98,8 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, desc = drv_data->hw_fence_client_queue_size[client_id].type; start_padding = desc->start_padding; end_padding = desc->end_padding; - headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num) + start_padding + - end_padding; + headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num, drv_data->has_soccp) + + start_padding + end_padding; queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries; payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD; txq_idx_start = desc->txq_idx_start; @@ -108,8 +132,9 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, hfi_table_header->version = 0; hfi_table_header->size = size; /* bytes */ /* Offset, from the Base Address, where the first queue header starts */ - hfi_table_header->qhdr0_offset = HW_FENCE_HFI_TABLE_HEADER_SIZE + start_padding; - hfi_table_header->qhdr_size = HW_FENCE_HFI_QUEUE_HEADER_SIZE; + hfi_table_header->qhdr0_offset = HW_FENCE_HFI_TABLE_HEADER_SIZE(drv_data->has_soccp) + + start_padding; + hfi_table_header->qhdr_size = HW_FENCE_HFI_QUEUE_HEADER_SIZE(drv_data->has_soccp); hfi_table_header->num_q = queues_num; /* number of queues */ hfi_table_header->num_active_q = queues_num; @@ -143,12 +168,15 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, /* Set the payload size */ hfi_queue_header->pkt_size = payload_size; + hw_fence_get_queue_idx_ptrs(drv_data, hfi_queue_header, &rd_idx_ptr, &wr_idx_ptr, + &tx_wm_ptr); + /* Set write index for clients' tx queues that index from nonzero value */ - if (txq_idx_start && IS_HW_FENCE_TX_QUEUE(i) && !hfi_queue_header->write_index) { + if (txq_idx_start && IS_HW_FENCE_TX_QUEUE(i) && !*wr_idx_ptr) { if (skip_txq_wr_idx) - hfi_queue_header->tx_wm = txq_idx_start; - hfi_queue_header->read_index = txq_idx_start; - hfi_queue_header->write_index = txq_idx_start; + *tx_wm_ptr = txq_idx_start; + *rd_idx_ptr = txq_idx_start; + *wr_idx_ptr = txq_idx_start; HWFNC_DBG_INIT("init:TX_QUEUE client:%d rd_idx=%s=%u\n", client_id, skip_txq_wr_idx ? "wr_idx=tx_wm" : "wr_idx", txq_idx_start); @@ -180,7 +208,8 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, queues[i].skip_wr_idx ? "true" : "false"); /* Next header */ - hfi_queue_header++; + hfi_queue_header = (struct msm_hw_fence_hfi_queue_header *) + ((char *)hfi_queue_header + hfi_table_header->qhdr_size); } return ret; @@ -208,8 +237,9 @@ static void _translate_queue_indexes_custom_to_default(struct msm_hw_fence_queue } } -int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, - struct msm_hw_fence_queue_payload *payload, int queue_type) +int hw_fence_read_queue(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, + int queue_type) { struct msm_hw_fence_queue *queue; @@ -222,41 +252,40 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, queue = &hw_fence_client->queues[queue_type]; HWFNC_DBG_Q("read client:%d queue:0x%pK\n", hw_fence_client->client_id, queue); - return hw_fence_read_queue_helper(queue, payload); + return hw_fence_read_queue_helper(drv_data, queue, payload); } -int hw_fence_read_queue_helper(struct msm_hw_fence_queue *queue, - struct msm_hw_fence_queue_payload *payload) +int hw_fence_read_queue_helper(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_queue *queue, struct msm_hw_fence_queue_payload *payload) { - struct msm_hw_fence_hfi_queue_header *hfi_header; u32 read_idx, write_idx, to_read_idx; - u32 *read_ptr; + u32 *read_ptr, *rd_idx_ptr, *wr_idx_ptr; u32 payload_size_u32, q_size_u32; struct msm_hw_fence_queue_payload *read_ptr_payload; - hfi_header = queue->va_header; - q_size_u32 = (queue->q_size_bytes / sizeof(u32)); payload_size_u32 = (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)); HWFNC_DBG_Q("sizeof payload:%lu\n", sizeof(struct msm_hw_fence_queue_payload)); - if (!hfi_header || !payload) { + if (!queue->va_header || !payload) { HWFNC_ERR("Invalid queue\n"); return -EINVAL; } + hw_fence_get_queue_idx_ptrs(drv_data, queue->va_header, &rd_idx_ptr, &wr_idx_ptr, NULL); + /* Make sure data is ready before read */ mb(); /* Get read and write index */ - read_idx = readl_relaxed(&hfi_header->read_index); - write_idx = readl_relaxed(&hfi_header->write_index); + read_idx = readl_relaxed(rd_idx_ptr); + write_idx = readl_relaxed(wr_idx_ptr); /* translate read and write indexes from custom indexing to dwords with no offset */ _translate_queue_indexes_custom_to_default(queue, &read_idx, &write_idx); HWFNC_DBG_Q("read rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n", - &hfi_header->read_index, &hfi_header->write_index, read_idx, write_idx, queue); + rd_idx_ptr, wr_idx_ptr, read_idx, write_idx, queue); if (read_idx == write_idx) { HWFNC_DBG_Q("Nothing to read!\n"); @@ -291,7 +320,7 @@ int hw_fence_read_queue_helper(struct msm_hw_fence_queue *queue, *payload = *read_ptr_payload; /* update the read index */ - writel_relaxed(to_read_idx, &hfi_header->read_index); + writel_relaxed(to_read_idx, rd_idx_ptr); /* update memory for the index */ wmb(); @@ -300,18 +329,14 @@ int hw_fence_read_queue_helper(struct msm_hw_fence_queue *queue, return to_read_idx == write_idx ? 0 : 1; } -static int _get_update_queue_params(struct msm_hw_fence_queue *queue, - struct msm_hw_fence_hfi_queue_header **hfi_header, u32 *q_size_u32, u32 *payload_size, - u32 *payload_size_u32, u32 **wr_ptr) +static int _get_update_queue_params(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_queue *queue, u32 *q_size_u32, u32 *payload_size, + u32 *payload_size_u32, u32 **rd_idx_ptr, u32 **wr_ptr) { - if (!queue) { - HWFNC_ERR("invalid queue\n"); - return -EINVAL; - } + u32 *tx_wm_ptr; - *hfi_header = queue->va_header; - if (!*hfi_header) { - HWFNC_ERR("Invalid queue hfi_header\n"); + if (!queue || !queue->va_header) { + HWFNC_ERR("invalid queue\n"); return -EINVAL; } @@ -319,11 +344,11 @@ static int _get_update_queue_params(struct msm_hw_fence_queue *queue, *payload_size = sizeof(struct msm_hw_fence_queue_payload); *payload_size_u32 = (*payload_size / sizeof(u32)); + hw_fence_get_queue_idx_ptrs(drv_data, queue->va_header, rd_idx_ptr, wr_ptr, &tx_wm_ptr); + /* if skipping update wr_index, then use hfi_header->tx_wm instead */ if (queue->skip_wr_idx) - *wr_ptr = &((*hfi_header)->tx_wm); - else - *wr_ptr = &((*hfi_header)->write_index); + *wr_ptr = tx_wm_ptr; return 0; } @@ -336,7 +361,6 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, u64 flags, u64 client_data, u32 error, int queue_type) { - struct msm_hw_fence_hfi_queue_header *hfi_header; struct msm_hw_fence_queue *queue; u32 read_idx; u32 write_idx; @@ -349,7 +373,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, bool lock_client = false; u32 lock_idx; u64 timestamp; - u32 *wr_ptr; + u32 *rd_idx_ptr, *wr_ptr; int ret = 0; if (queue_type >= hw_fence_client->queues_num) { @@ -359,8 +383,8 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, } queue = &hw_fence_client->queues[queue_type]; - if (_get_update_queue_params(queue, &hfi_header, &q_size_u32, &payload_size, - &payload_size_u32, &wr_ptr)) { + if (_get_update_queue_params(drv_data, queue, &q_size_u32, &payload_size, + &payload_size_u32, &rd_idx_ptr, &wr_ptr)) { HWFNC_ERR("Invalid client:%d q_type:%d queue\n", hw_fence_client->client_id, queue_type); return -EINVAL; @@ -373,11 +397,12 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, */ lock_client = _lock_client_queue(queue_type); if (lock_client) { - lock_idx = hw_fence_client->client_id - 1; + lock_idx = (hw_fence_client->client_id - 1) * HW_FENCE_LOCK_IDX_OFFSET; if (lock_idx >= drv_data->client_lock_tbl_cnt) { - HWFNC_ERR("lock for client id:%d exceed max:%d\n", - hw_fence_client->client_id, drv_data->client_lock_tbl_cnt); + HWFNC_ERR("can't reset rxq, lock for client:%d lock_idx:%d exceed max:%d\n", + hw_fence_client->client_id, lock_idx, + drv_data->client_lock_tbl_cnt); return -EINVAL; } HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx); @@ -390,12 +415,12 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, mb(); /* Get read and write index */ - read_idx = readl_relaxed(&hfi_header->read_index); + read_idx = readl_relaxed(rd_idx_ptr); write_idx = readl_relaxed(wr_ptr); HWFNC_DBG_Q("wr client:%d r_ptr:0x%pK w_ptr:0x%pK r_idx:%d w_idx:%d q:0x%pK type:%d s:%s\n", - hw_fence_client->client_id, &hfi_header->read_index, wr_ptr, - read_idx, write_idx, queue, queue_type, queue->skip_wr_idx ? "true" : "false"); + hw_fence_client->client_id, rd_idx_ptr, wr_ptr, read_idx, write_idx, queue, + queue_type, queue->skip_wr_idx ? "true" : "false"); /* translate read and write indexes from custom indexing to dwords with no offset */ _translate_queue_indexes_custom_to_default(queue, &read_idx, &write_idx); @@ -473,15 +498,15 @@ exit: int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 hash, u32 error) { - u32 q_size_u32, payload_size, payload_size_u32, read_idx, write_idx, second_idx, *wr_ptr; + u32 q_size_u32, payload_size, payload_size_u32, read_idx, write_idx, second_idx; struct msm_hw_fence_queue_payload tmp, *first_payload, *second_payload; - struct msm_hw_fence_hfi_queue_header *hfi_header; struct msm_hw_fence_queue *queue; + u32 *rd_idx_ptr, *wr_ptr; int ret = 0; queue = &hw_fence_client->queues[HW_FENCE_TX_QUEUE - 1]; - if (_get_update_queue_params(queue, &hfi_header, &q_size_u32, &payload_size, - &payload_size_u32, &wr_ptr)) { + if (_get_update_queue_params(drv_data, queue, &q_size_u32, &payload_size, + &payload_size_u32, &rd_idx_ptr, &wr_ptr)) { HWFNC_ERR("Invalid client:%d tx queue\n", hw_fence_client->client_id); return -EINVAL; } @@ -490,7 +515,7 @@ int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data, mb(); /* Get read and write index */ - read_idx = hfi_header->read_index; + read_idx = *rd_idx_ptr; write_idx = *wr_ptr; /* translate read and write indexes from custom indexing to dwords with no offset */ @@ -2195,30 +2220,32 @@ struct msm_hw_fence *hw_fence_find_with_dma_fence(struct hw_fence_driver_data *d void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client) { - struct msm_hw_fence_hfi_queue_header *hfi_header; struct msm_hw_fence_queue *queue; u32 rd_idx, wr_idx, lock_idx; + u32 *rd_idx_ptr, *wr_idx_ptr, *tx_wm_ptr; queue = &hw_fence_client->queues[HW_FENCE_TX_QUEUE - 1]; - hfi_header = queue->va_header; + hw_fence_get_queue_idx_ptrs(drv_data, queue->va_header, &rd_idx_ptr, &wr_idx_ptr, + &tx_wm_ptr); /* For the client TxQ: set the read-index same as last write that was done by the client */ mb(); /* make sure data is ready before read */ - wr_idx = readl_relaxed(&hfi_header->write_index); + wr_idx = readl_relaxed(wr_idx_ptr); if (queue->skip_wr_idx) - hfi_header->tx_wm = wr_idx; - writel_relaxed(wr_idx, &hfi_header->read_index); + *tx_wm_ptr = wr_idx; + writel_relaxed(wr_idx, rd_idx_ptr); wmb(); /* make sure data is updated after write the index*/ HWFNC_DBG_Q("update tx queue %s to match write_index:%u\n", queue->skip_wr_idx ? "read_index=tx_wm" : "read_index", wr_idx); /* For the client RxQ: set the write-index same as last read done by the client */ if (hw_fence_client->update_rxq) { - lock_idx = hw_fence_client->client_id - 1; + lock_idx = (hw_fence_client->client_id - 1) * HW_FENCE_LOCK_IDX_OFFSET; if (lock_idx >= drv_data->client_lock_tbl_cnt) { - HWFNC_ERR("cannot reset rxq, lock for client id:%d exceed max:%d\n", - hw_fence_client->client_id, drv_data->client_lock_tbl_cnt); + HWFNC_ERR("can't reset rxq, lock for client:%d lock_idx:%d exceed max:%d\n", + hw_fence_client->client_id, lock_idx, + drv_data->client_lock_tbl_cnt); return; } HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx); @@ -2227,11 +2254,12 @@ void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 1); queue = &hw_fence_client->queues[HW_FENCE_RX_QUEUE - 1]; - hfi_header = queue->va_header; + hw_fence_get_queue_idx_ptrs(drv_data, queue->va_header, &rd_idx_ptr, &wr_idx_ptr, + &tx_wm_ptr); mb(); /* make sure data is ready before read */ - rd_idx = readl_relaxed(&hfi_header->read_index); - writel_relaxed(rd_idx, &hfi_header->write_index); + rd_idx = readl_relaxed(rd_idx_ptr); + writel_relaxed(rd_idx, wr_idx_ptr); wmb(); /* make sure data is updated after write the index */ /* unlock */ @@ -2289,18 +2317,6 @@ enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id case HW_FENCE_CLIENT_ID_CTX0: data_id = HW_FENCE_CLIENT_DATA_ID_CTX0; break; - case HW_FENCE_CLIENT_ID_VAL0: - data_id = HW_FENCE_CLIENT_DATA_ID_VAL0; - break; - case HW_FENCE_CLIENT_ID_VAL1: - data_id = HW_FENCE_CLIENT_DATA_ID_VAL1; - break; - case HW_FENCE_CLIENT_ID_IPE: - data_id = HW_FENCE_CLIENT_DATA_ID_IPE; - break; - case HW_FENCE_CLIENT_ID_VPU: - data_id = HW_FENCE_CLIENT_DATA_ID_VPU; - break; default: data_id = HW_FENCE_MAX_CLIENTS_WITH_DATA; break; diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 814220105a..5d27852272 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -300,8 +300,8 @@ static int _process_fence_error_client_loopback(struct hw_fence_driver_data *drv u32 client_id; for (i = 0; read && i < HW_FENCE_MAX_ITER_READ; i++) { - read = hw_fence_read_queue_helper(&drv_data->ctrl_queues[HW_FENCE_RX_QUEUE - 1], - &payload); + read = hw_fence_read_queue_helper(drv_data, + &drv_data->ctrl_queues[HW_FENCE_RX_QUEUE - 1], &payload); if (read < 0) { HWFNC_DBG_Q("unable to read ctrl rxq for db_flag_id:%d\n", db_flag_id); return read; @@ -917,15 +917,16 @@ static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_d goto exit; } - if (desc->start_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) { + if (desc->start_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num, + drv_data->has_soccp)) { HWFNC_ERR("%s client queues_num:%u start_padding:%u will overflow mem_size\n", desc->name, desc->queues_num, desc->start_padding); ret = -EINVAL; goto exit; } - if (desc->end_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) - - desc->start_padding) { + if (desc->end_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num, + drv_data->has_soccp) - desc->start_padding) { HWFNC_ERR("%s client q_num:%u start_p:%u end_p:%u will overflow mem_size\n", desc->name, desc->queues_num, desc->start_padding, desc->end_padding); ret = -EINVAL; @@ -1000,15 +1001,16 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries; if (queue_size >= ((U32_MAX & PAGE_MASK) - - (HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) + + (HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num, drv_data->has_soccp) + desc->start_padding + desc->end_padding)) / desc->queues_num) { HWFNC_ERR("%s client queue_sz:%u start_p:%u end_p:%u will overflow mem size\n", desc->name, queue_size, desc->start_padding, desc->end_padding); return -EINVAL; } - desc->mem_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) + - (queue_size * desc->queues_num) + desc->start_padding + desc->end_padding); + desc->mem_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num, + drv_data->has_soccp) + (queue_size * desc->queues_num) + desc->start_padding + + desc->end_padding); if (desc->mem_size > MAX_CLIENT_QUEUE_MEM_SIZE) { HWFNC_ERR("%s client queue mem_size:%u greater than max mem size:%d\n", @@ -1120,15 +1122,17 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) return -EINVAL; } drv_data->hw_fence_ctrl_queue_size = HW_FENCE_CTRL_QUEUE_PAYLOAD * - drv_data->hw_fence_queue_entries; + HW_FENCE_CTRL_QUEUE_ENTRIES; - if (drv_data->hw_fence_ctrl_queue_size >= (U32_MAX - HW_FENCE_HFI_CTRL_HEADERS_SIZE) / + if (drv_data->hw_fence_ctrl_queue_size >= (U32_MAX - + HW_FENCE_HFI_CTRL_HEADERS_SIZE(drv_data->has_soccp)) / HW_FENCE_CTRL_QUEUES) { HWFNC_ERR("queue size:%u will overflow ctrl queue mem size\n", drv_data->hw_fence_ctrl_queue_size); return -EINVAL; } - drv_data->hw_fence_mem_ctrl_queues_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE + + drv_data->hw_fence_mem_ctrl_queues_size = + HW_FENCE_HFI_CTRL_HEADERS_SIZE(drv_data->has_soccp) + (HW_FENCE_CTRL_QUEUES * drv_data->hw_fence_ctrl_queue_size); /* clients queues init */ diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index c01ce37431..507d4a3761 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -506,7 +506,8 @@ static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg) return -EINVAL; } - ret = hw_fence_debug_wait_val(hw_fence_client, fence, 0, data.timeout_ms, &error); + ret = hw_fence_debug_wait_val(hw_fence_drv_data, hw_fence_client, fence, 0, + data.timeout_ms, &error); if (ret) HWFNC_ERR("failed to wait for hw-fence client:%d ctx:%llu seq:%llu\n", hw_fence_client->client_id, fence->context, fence->seqno); diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 9ae8f4df25..95c209adb0 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -659,13 +659,14 @@ int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, u32 dump_c hw_fence_client = (struct msm_hw_fence_client *)client_handle; if (dump_flags & MSM_HW_FENCE_DBG_DUMP_QUEUES) { - hw_fence_debug_dump_queues(HW_FENCE_PRINTK, hw_fence_client); + hw_fence_debug_dump_queues(hw_fence_drv_data, HW_FENCE_PRINTK, hw_fence_client); if (dump_clients_mask) for (client_id = 0; client_id < HW_FENCE_CLIENT_MAX; client_id++) if ((dump_clients_mask & (1 << client_id)) && hw_fence_drv_data->clients[client_id]) - hw_fence_debug_dump_queues(HW_FENCE_PRINTK, + hw_fence_debug_dump_queues(hw_fence_drv_data, + HW_FENCE_PRINTK, hw_fence_drv_data->clients[client_id]); } diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 87a0991e3c..1771b40f36 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -249,7 +249,8 @@ static int synx_hwfence_wait(struct synx_session *session, u32 h_synx, u64 timeo h_synx &= HW_FENCE_HANDLE_INDEX_MASK; if (session->type >= SYNX_CLIENT_HW_FENCE_TEST_CTX0 && session->type <= SYNX_CLIENT_HW_FENCE_TEST_CTX0 + MAX_SUPPORTED_TEST) - ret = hw_fence_debug_wait_val(session->client, NULL, h_synx, timeout_ms, &error); + ret = hw_fence_debug_wait_val(hw_fence_drv_data, session->client, NULL, h_synx, + timeout_ms, &error); #endif /* CONFIG_DEBUG_FS */ if (ret) { From 624f8cca749c03fe5435e1c942a01ebabb895e49 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 17 Jan 2024 15:57:54 -0800 Subject: [PATCH 113/166] mm-drivers: hw_fence: update client rx queue for dma-fence signal Some clients like GFX do not want client rxq to be updated when registering for wait on already signaled fences, but client rxq should be updated when the fence is signaled to client through other scenarios, e.g. synx_import or msm_hw_fence_signal. Add separate properties to control these two scenarios. Change-Id: Id4b9abe8db0a6b0893d6afa276d2ee133a088cb9 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_ipc.h | 11 ++ hw_fence/include/hw_fence_drv_priv.h | 6 +- hw_fence/src/hw_fence_drv_ipc.c | 255 +++++++++++++++++---------- hw_fence/src/hw_fence_drv_priv.c | 13 +- hw_fence/src/msm_hw_fence.c | 12 +- 5 files changed, 196 insertions(+), 101 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_ipc.h b/hw_fence/include/hw_fence_drv_ipc.h index 776f9e013a..318c25e6a5 100644 --- a/hw_fence/include/hw_fence_drv_ipc.h +++ b/hw_fence/include/hw_fence_drv_ipc.h @@ -154,6 +154,17 @@ int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 clien */ bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id); +/** + * hw_fence_ipcc_signaled_needs_rxq_update() - Returns bool to indicate if client requires + * rx-queue update when registering to wait on an already signaled fence. + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * Return: true if client needs to update rxq when dma-fence is signaled, false otherwise + */ +bool hw_fence_ipcc_signaled_needs_rxq_update(struct hw_fence_driver_data *drv_data, + int client_id); + /** * hw_fence_ipcc_signaled_needs_ipc_irq() - Returns bool to indicate if client needs ipc interrupt * for already signaled fences diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index d0269d56ee..75a79d685e 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -195,7 +195,10 @@ enum payload_type { * @ipc_signal_id: id of the signal to be triggered for this client * @ipc_client_vid: virtual id of the ipc client for this hw fence driver client * @ipc_client_pid: physical id of the ipc client for this hw fence driver client - * @update_rxq: bool to indicate if client uses rx-queue + * @update_rxq: bool to indicate if client requires rx queue update in general signal case + * (e.g. if dma-fence is signaled) + * @signaled_update_rxq: bool to indicate if client requires rx queue update when registering to + * wait on an already signaled fence * @signaled_send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences * @txq_update_send_ipc: bool to indicate if client requires ipc interrupt for txq updates * @context_id: context id for fences created internally @@ -216,6 +219,7 @@ struct msm_hw_fence_client { int ipc_client_vid; int ipc_client_pid; bool update_rxq; + bool signaled_update_rxq; bool signaled_send_ipc; bool txq_update_send_ipc; u64 context_id; diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index 291220b14d..045cf7b67a 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -27,14 +27,19 @@ * @ipc_client_id_virt: virtual ipc client id for the hw-fence client. * @ipc_client_id_phys: physical ipc client id for the hw-fence client. * @ipc_signal_id: ipc signal id for the hw-fence client. - * @update_rxq: bool to indicate if clinet uses rx-queue. - * @send_ipc: bool to indicate if client requires ipc interrupt for signaled fences + * @update_rxq: bool to indicate if client requires rx queue update in general signal case + * (e.g. if dma-fence is signaled) + * @signaled_update_rxq: bool to indicate if client requires rx queue update when registering to + * wait on an already signaled fence + * @signaled_send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences + * @txq_update_send_ipc: bool to indicate if client requires ipc interrupt for signaled fences */ struct hw_fence_client_ipc_map { int ipc_client_id_virt; int ipc_client_id_phys; int ipc_signal_id; bool update_rxq; + bool signaled_update_rxq; bool signaled_send_ipc; bool txq_update_send_ipc; }; @@ -48,33 +53,50 @@ struct hw_fence_client_ipc_map { * Note that the index of this struct must match the enum hw_fence_client_id */ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_IPC_MAP_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true, false}, - {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false, true}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 0, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 1, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 2, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 3, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 4, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 5, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, true, false, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 1, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 2, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 3, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 4, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 5, false, false, true, + false}, #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, true, false, + true}, #else - {0, 0, 0, false, false, false}, /* val0 */ - {0, 0, 0, false, false, false}, /* val1 */ - {0, 0, 0, false, false, false}, /* val2 */ - {0, 0, 0, false, false, false}, /* val3 */ - {0, 0, 0, false, false, false}, /* val4 */ - {0, 0, 0, false, false, false}, /* val5 */ - {0, 0, 0, false, false, false}, /* val6 */ + {0, 0, 0, false, false, false, false}, /* val0 */ + {0, 0, 0, false, false, false, false}, /* val1 */ + {0, 0, 0, false, false, false, false}, /* val2 */ + {0, 0, 0, false, false, false, false}, /* val3 */ + {0, 0, 0, false, false, false, false}, /* val4 */ + {0, 0, 0, false, false, false, false}, /* val5 */ + {0, 0, 0, false, false, false, false}, /* val6 */ #endif /* CONFIG_DEBUG_FS */ - {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_VID, 0, true, true, false}, - {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_VID, 0, true, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_VID, 0, true, true, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_VID, 0, true, true, true, + false}, }; /** @@ -89,41 +111,66 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_IPC_MAP_MAX] = * HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC). */ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_IPC_MAP_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 1, true, true, false}, - {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, false, false, true}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 0, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 1, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 2, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 3, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 4, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 5, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 1, true, true, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, true, false, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 1, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 2, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 3, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 4, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 5, false, false, true, + false}, #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, true, false, + true}, #else - {0, 0, 0, false, false, false}, /* val0 */ - {0, 0, 0, false, false, false}, /* val1 */ - {0, 0, 0, false, false, false}, /* val2 */ - {0, 0, 0, false, false, false}, /* val3 */ - {0, 0, 0, false, false, false}, /* val4 */ - {0, 0, 0, false, false, false}, /* val5 */ - {0, 0, 0, false, false, false}, /* val6 */ + {0, 0, 0, false, false, false, false}, /* val0 */ + {0, 0, 0, false, false, false, false}, /* val1 */ + {0, 0, 0, false, false, false, false}, /* val2 */ + {0, 0, 0, false, false, false, false}, /* val3 */ + {0, 0, 0, false, false, false, false}, /* val4 */ + {0, 0, 0, false, false, false, false}, /* val5 */ + {0, 0, 0, false, false, false, false}, /* val6 */ #endif /* CONFIG_DEBUG_FS */ - {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID, 0, true, true, false}, - {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true, false}, - {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID, 0, true, true, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, false, true, + false}, }; /** @@ -138,47 +185,66 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_IPC_MAP_MAX] * HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC). */ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_sun[HW_FENCE_IPC_MAP_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 0, true, true, false}, - {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, false, false, true}, - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 0, false, true, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 0, true, true, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, true, false, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 0, false, false, true, false}, /* ctl0 */ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 1, false, true, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 1, false, false, true, false}, /* ctl1 */ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 2, false, true, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 2, false, false, true, false}, /* ctl2 */ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 3, false, true, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 3, false, false, true, false}, /* ctl3 */ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 4, false, true, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 4, false, false, true, false}, /* ctl4 */ - {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 5, false, true, + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 5, false, false, true, false}, /* ctl5 */ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, false, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, false, true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, true, false, + true}, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, true, false, + true}, #else - {0, 0, 0, false, false, false}, /* val0 */ - {0, 0, 0, false, false, false}, /* val1 */ - {0, 0, 0, false, false, false}, /* val2 */ - {0, 0, 0, false, false, false}, /* val3 */ - {0, 0, 0, false, false, false}, /* val4 */ - {0, 0, 0, false, false, false}, /* val5 */ - {0, 0, 0, false, false, false}, /* val6 */ + {0, 0, 0, false, false, false, false}, /* val0 */ + {0, 0, 0, false, false, false, false}, /* val1 */ + {0, 0, 0, false, false, false, false}, /* val2 */ + {0, 0, 0, false, false, false, false}, /* val3 */ + {0, 0, 0, false, false, false, false}, /* val4 */ + {0, 0, 0, false, false, false, false}, /* val5 */ + {0, 0, 0, false, false, false, false}, /* val6 */ #endif /* CONFIG_DEBUG_FS */ - {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID_SUN, 0, true, true, false}, - {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true, false}, - {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, true, false}, - {HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, true, false}, + {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID_SUN, 0, true, true, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, false, true, + false}, + {HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, false, true, + false}, }; int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id) @@ -213,6 +279,15 @@ bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int c return drv_data->ipc_clients_table[client_id].update_rxq; } +bool hw_fence_ipcc_signaled_needs_rxq_update(struct hw_fence_driver_data *drv_data, + int client_id) +{ + if (!drv_data || client_id >= drv_data->clients_num) + return false; + + return drv_data->ipc_clients_table[client_id].signaled_update_rxq; +} + bool hw_fence_ipcc_signaled_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id) { if (!drv_data || client_id >= drv_data->clients_num) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 89180db90d..572f657c36 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1669,7 +1669,7 @@ struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, - u64 flags, u64 client_data, u32 error) + u64 flags, u64 client_data, u32 error, bool signal_from_import) { u32 tx_client_id = drv_data->ipcc_client_pid; /* phys id for tx client */ u32 rx_client_id = hw_fence_client->ipc_client_vid; /* virt id for rx client */ @@ -1682,13 +1682,14 @@ static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, hash, flags, error); } else { /* Write to Rx queue */ - if (hw_fence_client->update_rxq) + if (hw_fence_client->signaled_update_rxq || + (hw_fence_client->update_rxq && !signal_from_import)) hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id, hw_fence->seq_id, hash, flags, client_data, error, HW_FENCE_RX_QUEUE - 1); /* Signal the hw fence now */ - if (hw_fence_client->signaled_send_ipc) + if (hw_fence_client->signaled_send_ipc || !signal_from_import) hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, hw_fence_client->ipc_signal_id); } @@ -1911,7 +1912,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, /* signal the join hw fence */ _fence_ctl_signal(drv_data, hw_fence_client, join_fence, *hash_join_fence, 0, - client_data, join_fence->error); + client_data, join_fence->error, true); set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &array->base.flags); /* @@ -1996,7 +1997,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, if (fence != NULL) set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &fence->flags); _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, *hash, 0, client_data, - hw_fence->error); + hw_fence->error, true); } return 0; @@ -2048,7 +2049,7 @@ static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, client_data = hw_fence->client_data[data_id]; _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence, - hash, 0, client_data, error); + hash, 0, client_data, error, false); } } } diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 9ae8f4df25..9d1c828cbc 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -87,6 +87,8 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, } hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id); + hw_fence_client->signaled_update_rxq = + hw_fence_ipcc_signaled_needs_rxq_update(hw_fence_drv_data, client_id); hw_fence_client->signaled_send_ipc = hw_fence_ipcc_signaled_needs_ipc_irq(hw_fence_drv_data, client_id); hw_fence_client->txq_update_send_ipc = @@ -94,10 +96,12 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, hw_fence_client->queues_num = hw_fence_utils_get_queues_num(hw_fence_drv_data, client_id); if (!hw_fence_client->queues_num || (hw_fence_client->update_rxq && - hw_fence_client->queues_num < HW_FENCE_CLIENT_QUEUES)) { - HWFNC_ERR("client:%d invalid q_num:%d for updates_rxq:%s\n", client_id, - hw_fence_client->queues_num, - hw_fence_client->update_rxq ? "true" : "false"); + hw_fence_client->queues_num < HW_FENCE_CLIENT_QUEUES) || + (!hw_fence_client->update_rxq && hw_fence_client->signaled_update_rxq)) { + HWFNC_ERR("client:%d invalid q_num:%d for updates_rxq:%s signaled_update_rxq:%s\n", + client_id, hw_fence_client->queues_num, + hw_fence_client->update_rxq ? "true" : "false", + hw_fence_client->signaled_update_rxq ? "true" : "false"); ret = -EINVAL; goto error; } From 816618f370d7dc88bce276991aaebb1800f709cc Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 13 Feb 2024 17:17:28 -0800 Subject: [PATCH 114/166] Revert "mm-drivers: hw_fence: add hw-fence validation client ID" This reverts commit fb7af013034c125791847d58d8dc4061e15817aa. Reason for revert: This change is no longer needed because synx dependencies are now merged and part of vendor. Change-Id: I6b3f0ec6a093717b38cce9429a23d67b0948e859 Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence_synx_translation.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 1771b40f36..19904411ab 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -20,10 +20,6 @@ #define MAX_SUPPORTED_DPU0 (HW_FENCE_CLIENT_ID_CTL5 - HW_FENCE_CLIENT_ID_CTL0) #define MAX_SUPPORTED_TEST (HW_FENCE_CLIENT_ID_VAL6 - HW_FENCE_CLIENT_ID_VAL1) -#ifndef SYNX_CLIENT_HW_FENCE_TEST_CTX0 -#define SYNX_CLIENT_HW_FENCE_TEST_CTX0 2368 -#endif - static enum hw_fence_client_id _get_hw_fence_client_id(enum synx_client_id synx_client_id) { enum hw_fence_client_id hw_fence_client_id; From 12ab3145bd0702e28074df4c8a89164bc414d490 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 21 Feb 2024 14:33:16 -0800 Subject: [PATCH 115/166] mm-drivers: hw_fence: convert signal status for synx_wait Currently, synx_wait does not convert synx signal statuses for all errors. Update function call to perform this conversion. Change-Id: I23366b4bd85c8288e1917cce5982d2f0796f9688 Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence_synx_translation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 1771b40f36..91ce2f5a0d 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -259,7 +259,7 @@ static int synx_hwfence_wait(struct synx_session *session, u32 h_synx, u64 timeo return hw_fence_interop_to_synx_status(ret); } - return error ? error : SYNX_STATE_SIGNALED_SUCCESS; + return hw_fence_interop_to_synx_signal_status(MSM_HW_FENCE_FLAG_SIGNAL, error); } int synx_hwfence_recover(enum synx_client_id id) From d0d102d3cfe39c0c141673312a21755d91f556fe Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 9 Feb 2024 16:05:09 -0800 Subject: [PATCH 116/166] mm-drivers: hw_fence: move workload from isr context to worker thread Currently, processing of signals that APSS receives from SOCCP is entirely done in ISR context. Move the queue read/write operations and signaling of validation clients to worker thread to avoid locking mutexes with interrupts disabled. Change-Id: Ib36448c9c0a856f9ab210e14853255401b99876e Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 6 +++++ hw_fence/src/hw_fence_drv_utils.c | 38 +++++++++++++++++++++++++--- hw_fence/src/msm_hw_fence.c | 6 +++++ 3 files changed, 47 insertions(+), 3 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 9f19937313..01e48a3626 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -389,6 +389,9 @@ struct hw_fence_signal_cb { * @dma_fence_table: table with internal dma-fences for hw-fences * @soccp_rproc: soccp rproc object used to set power vote * @has_soccp: flag to indicate if soccp is present (otherwise vm is used) + * @soccp_listener_thread: thread that processes interrupts received from soccp + * @soccp_wait_queue: wait queue to notify soccp_listener_thread of new interrupts + * @signaled_clients_mask: mask to track signals received from soccp by hw-fence driver */ struct hw_fence_driver_data { @@ -484,6 +487,9 @@ struct hw_fence_driver_data { /* soccp is present */ struct rproc *soccp_rproc; bool has_soccp; + struct task_struct *soccp_listener_thread; + wait_queue_head_t soccp_wait_queue; + atomic_t signaled_clients_mask; }; /** diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 9514cdc853..ed3902660f 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -19,6 +19,8 @@ #include #endif #include +#include +#include #include "hw_fence_drv_priv.h" #include "hw_fence_drv_utils.h" @@ -457,17 +459,35 @@ int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data) static irqreturn_t hw_fence_soccp_irq_handler(int irq, void *data) { struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data; - u64 mask; + u32 mask; mask = hw_fence_ipcc_get_signaled_clients_mask(drv_data); - hw_fence_utils_process_signaled_clients_mask(drv_data, mask); + atomic_or(mask, &drv_data->signaled_clients_mask); + wake_up_all(&drv_data->soccp_wait_queue); return IRQ_HANDLED; } +static int hw_fence_soccp_listener(void *data) +{ + struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data; + u32 mask; + + while (drv_data->has_soccp) { + wait_event(drv_data->soccp_wait_queue, + atomic_read(&drv_data->signaled_clients_mask) != 0); + mask = atomic_xchg(&drv_data->signaled_clients_mask, 0); + if (mask) + hw_fence_utils_process_signaled_clients_mask(drv_data, mask); + } + + return 0; +} + int hw_fence_utils_init_soccp_irq(struct hw_fence_driver_data *drv_data) { struct platform_device *pdev; + struct task_struct *thread; int irq, ret; if (!drv_data || !drv_data->dev || !drv_data->has_soccp) { @@ -476,6 +496,8 @@ int hw_fence_utils_init_soccp_irq(struct hw_fence_driver_data *drv_data) return -EINVAL; } + init_waitqueue_head(&drv_data->soccp_wait_queue); + pdev = to_platform_device(drv_data->dev); irq = platform_get_irq(pdev, 0); if (irq < 0) { @@ -486,8 +508,18 @@ int hw_fence_utils_init_soccp_irq(struct hw_fence_driver_data *drv_data) ret = devm_request_irq(drv_data->dev, irq, hw_fence_soccp_irq_handler, IRQF_TRIGGER_HIGH, "hwfence-driver", drv_data); - if (ret < 0) + if (ret < 0) { HWFNC_ERR("failed to register irq:%d ret:%d\n", irq, ret); + return ret; + } + + thread = kthread_run(hw_fence_soccp_listener, (void *)drv_data, + "msm_hw_fence_soccp_listener"); + if (IS_ERR(thread)) { + HWFNC_ERR("failed to create thread to process signals received from soccp\n"); + return PTR_ERR(thread); + } + drv_data->soccp_listener_thread = thread; return ret; } diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index cc6cf7d6a3..7444b9d7d7 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -12,6 +12,7 @@ #if (KERNEL_VERSION(6, 5, 0) <= LINUX_VERSION_CODE) #include #endif +#include #include "hw_fence_drv_priv.h" #include "hw_fence_drv_utils.h" @@ -864,6 +865,11 @@ static int msm_hw_fence_remove(struct platform_device *pdev) return -EINVAL; } + /* indicate listener thread should stop listening for interrupts from soccp */ + hw_fence_drv_data->has_soccp = false; + if (hw_fence_drv_data->soccp_listener_thread) + kthread_stop(hw_fence_drv_data->soccp_listener_thread); + dev_set_drvdata(&pdev->dev, NULL); kfree(hw_fence_drv_data); hw_fence_drv_data = (void *) -EPROBE_DEFER; From 5e169d535988fb168318dfc3ce03db46a65cbf0a Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 29 Feb 2024 17:07:34 -0800 Subject: [PATCH 117/166] mm-drivers: hw_fence: change gfp flag to GFP_ATOMIC in synx_create Currently, kzalloc call with GFP_KERNEL flag prevents synx_create from being called in atomic context. Change flag to GFP_ATOMIC to allow for this call in this context. Change-Id: Iadda94facdaac76fd4614526f2808f0dac1fed09 Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_priv.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index cd936ee2b2..010bcc4e14 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1320,11 +1320,11 @@ struct dma_fence *hw_dma_fence_init(struct msm_hw_fence_client *hw_fence_client, spinlock_t *fence_lock; /* create dma fence */ - fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL); + fence_lock = kzalloc(sizeof(*fence_lock), GFP_ATOMIC); if (!fence_lock) return ERR_PTR(-ENOMEM); - fence = kzalloc(sizeof(*fence), GFP_KERNEL); + fence = kzalloc(sizeof(*fence), GFP_ATOMIC); if (!fence) { kfree(fence_lock); return ERR_PTR(-ENOMEM); @@ -2393,7 +2393,7 @@ int hw_fence_add_callback(struct hw_fence_driver_data *drv_data, struct dma_fenc return -EINVAL; } - signal_cb = kzalloc(sizeof(*signal_cb), GFP_KERNEL); + signal_cb = kzalloc(sizeof(*signal_cb), GFP_ATOMIC); if (!signal_cb) return -ENOMEM; From d9b4137458bc56ba7fe6eec3ee8e4db25bc56077 Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 4 Mar 2024 15:56:23 -0800 Subject: [PATCH 118/166] mm-drivers: hw_fence: return -EPROBE_DEFER if soccp is not yet up The symbols dependency between kernel driver and hw-fence driver is insufficient to ensure that HW Fence Driver probes after SoCCP driver is up because the same driver that manages soccp also manages other devices. When SoCCP is not up yet, HW Fence Driver fails to get the soccp remoteproc object. Instead of returning -EINVAL, return -EPROBE_DEFER to ensure that HW Fence Driver probe is delayed until after SoCCP is ready. Also, appropriately reduce errors to debug info for this path. Change-Id: I89c11bb7431e7abe73a9b4facf1be7a2b68e4ad0 Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_priv.c | 2 +- hw_fence/src/hw_fence_drv_utils.c | 4 ++-- hw_fence/src/msm_hw_fence.c | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 010bcc4e14..9d0a3feb5d 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -694,7 +694,7 @@ int hw_fence_init(struct hw_fence_driver_data *drv_data) ret = hw_fence_utils_parse_dt_props(drv_data); if (ret) { - HWFNC_ERR("failed to set dt properties\n"); + HWFNC_DBG_INFO("failed to set dt properties\n"); goto exit; } diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index ed3902660f..61aae873f6 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -1135,8 +1135,8 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) drv_data->has_soccp = true; drv_data->soccp_rproc = rproc_get_by_phandle(ph); if (IS_ERR_OR_NULL(drv_data->soccp_rproc)) { - HWFNC_ERR("failed to find rproc for phandle:%u\n", ph); - return -EINVAL; + HWFNC_DBG_INFO("failed to find rproc for phandle:%u\n", ph); + return -EPROBE_DEFER; } } diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 7444b9d7d7..1046c6449e 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -821,7 +821,8 @@ error: kfree(hw_fence_drv_data); hw_fence_drv_data = (void *) -EPROBE_DEFER; - HWFNC_ERR_ONCE("error %d\n", rc); + HWFNC_DBG_INFO("error %d\n", rc); + return rc; } From 8bc1ba8f7f693f10e97e1488c4e9dfafb4ffe21b Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 4 Mar 2024 13:26:08 -0800 Subject: [PATCH 119/166] mm-drivers: hw_fence: signal val clients with ipcc on targets with soccp On targets without soccp, ipcc signaling cannot be used to signal validation clients because vm has registered interrupt for apps. However, on targets with soccp, ipcc signaling can be used. Update signaling for validation clients such that ipcc signaling is used instead of directly processing the validation client signal when hw-fence is signaled. This ensures that signaling pathway is more similar between validation and real clients and also avoids invalid mutex locking inside atomic context. Change-Id: I5382ef661205fb4394a01412dfccf105a292fbb0 Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_ipc.c | 28 +++++++++++++++++++--------- hw_fence/src/hw_fence_drv_priv.c | 15 +++++++++------ 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index 045cf7b67a..25e68869b8 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -202,19 +202,19 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_sun[HW_FENCE_IPC_MAP_MAX {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 5, false, false, true, false}, /* ctl5 */ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, true, false, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, true, true, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, true, false, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, true, true, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, true, false, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, true, true, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, true, false, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, true, true, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, true, false, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, true, true, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, true, false, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, true, true, true}, - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, true, false, + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, true, true, true}, #else {0, 0, 0, false, false, false, false}, /* val0 */ @@ -605,7 +605,7 @@ int hw_fence_ipcc_enable_client_signal_pairs(struct hw_fence_driver_data *drv_da hw_fence_client->ipc_signal_id, drv_data->has_soccp); /* Enable input signal from driver to client */ - if (ipc_client_vid != drv_data->ipcc_client_vid) + if (drv_data->has_soccp || ipc_client_vid != drv_data->ipcc_client_vid) _enable_client_signal_pair(drv_data, hw_fence_client->ipc_client_id_phys, drv_data->ipcc_client_vid, hw_fence_client->ipc_signal_id); @@ -621,6 +621,15 @@ int hw_fence_ipcc_enable_client_signal_pairs(struct hw_fence_driver_data *drv_da return 0; } +static bool _is_invalid_signaling_client(struct hw_fence_driver_data *drv_data, u32 client_id) +{ +#if IS_ENABLED(CONFIG_DEBUG_FS) + return client_id != drv_data->ipcc_fctl_vid && client_id != drv_data->ipcc_client_vid; +#else + return client_id != drv_data->ipcc_fctl_vid; +#endif +} + u64 hw_fence_ipcc_get_signaled_clients_mask(struct hw_fence_driver_data *drv_data) { u32 client_id, signal_id, reg_val; @@ -651,7 +660,7 @@ u64 hw_fence_ipcc_get_signaled_clients_mask(struct hw_fence_driver_data *drv_dat HWFNC_DBG_IRQ("read recv_id value:0x%x client:%u signal:%u\n", reg_val, client_id, signal_id); - if (client_id != drv_data->ipcc_fctl_vid) { + if (_is_invalid_signaling_client(drv_data, client_id)) { HWFNC_ERR("Received client:%u signal:%u expected client:%u\n", client_id, signal_id, drv_data->ipcc_fctl_vid); continue; @@ -665,6 +674,7 @@ u64 hw_fence_ipcc_get_signaled_clients_mask(struct hw_fence_driver_data *drv_dat signal_id = signal_id - hw_fence_ipcc_get_signal_id(drv_data, HW_FENCE_CLIENT_ID_VAL0) + HW_FENCE_CLIENT_ID_VAL0; #endif /* CONFIG_DEBUG_FS*/ + mask |= BIT(signal_id); } diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 010bcc4e14..e8278461c6 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1713,17 +1713,20 @@ static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, hw_fence->seq_id, hash, flags, client_data, error, HW_FENCE_RX_QUEUE - 1); +#if IS_ENABLED(CONFIG_DEBUG_FS) + /* signal validation clients on targets with vm through custom mechanism */ + if (!drv_data->has_soccp && hw_fence_client->client_id >= HW_FENCE_CLIENT_ID_VAL0 && + hw_fence_client->client_id <= HW_FENCE_CLIENT_ID_VAL6) { + process_validation_client_loopback(drv_data, hw_fence_client->client_id); + return; + } +#endif /* CONFIG_DEBUG_FS */ + /* Signal the hw fence now */ if (hw_fence_client->signaled_send_ipc || !signal_from_import) hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, hw_fence_client->ipc_signal_id); } - -#if IS_ENABLED(CONFIG_DEBUG_FS) - if (hw_fence_client->client_id >= HW_FENCE_CLIENT_ID_VAL0 - && hw_fence_client->client_id <= HW_FENCE_CLIENT_ID_VAL6) - process_validation_client_loopback(drv_data, hw_fence_client->client_id); -#endif /* CONFIG_DEBUG_FS */ } static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data, From 294cbf861d7f45a6012a4da09fa68495c1f7838a Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 9 Feb 2024 16:52:24 -0800 Subject: [PATCH 120/166] mm-drivers: hw_fence: add ipcc support for niobe target Add ipcc version information and ipcc physical client and virtual ids for hw-fence clients on niobe. Change-Id: I4c2ef71584d80c6766e53aa6df49282cf4bf9f9e Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_ipc.h | 24 +++++++ hw_fence/include/hw_fence_drv_utils.h | 2 +- hw_fence/include/msm_hw_fence.h | 6 +- hw_fence/src/hw_fence_drv_ipc.c | 96 +++++++++++++++++++++++++++ hw_fence/src/hw_fence_drv_priv.c | 2 +- hw_fence/src/hw_fence_drv_utils.c | 30 ++++++++- 6 files changed, 155 insertions(+), 5 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_ipc.h b/hw_fence/include/hw_fence_drv_ipc.h index 318c25e6a5..ff4cbc25f5 100644 --- a/hw_fence/include/hw_fence_drv_ipc.h +++ b/hw_fence/include/hw_fence_drv_ipc.h @@ -21,6 +21,10 @@ #define HW_FENCE_IPC_CLIENT_ID_IFE5_VID 133 #define HW_FENCE_IPC_CLIENT_ID_IFE6_VID 134 #define HW_FENCE_IPC_CLIENT_ID_IFE7_VID 135 +#define HW_FENCE_IPC_CLIENT_ID_IFE8_VID 136 +#define HW_FENCE_IPC_CLIENT_ID_IFE9_VID 137 +#define HW_FENCE_IPC_CLIENT_ID_IFE10_VID 138 +#define HW_FENCE_IPC_CLIENT_ID_IFE11_VID 139 /* ipc clients physical client-id */ #define HW_FENCE_IPC_CLIENT_ID_APPS_PID 3 @@ -41,15 +45,35 @@ /* ipc clients physical client-id on other targets */ #define HW_FENCE_IPC_CLIENT_ID_IPE_PID_SUN 9 #define HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN 20 +#define HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE 2 +#define HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE 3 +#define HW_FENCE_IPC_CLIENT_ID_GPU_PID_NIOBE 8 +#define HW_FENCE_IPC_CLIENT_ID_IPE_PID_NIOBE 10 +#define HW_FENCE_IPC_CLIENT_ID_VPU_PID_NIOBE 11 +#define HW_FENCE_IPC_CLIENT_ID_SOCCP_PID_NIOBE 13 +#define HW_FENCE_IPC_CLIENT_ID_IFE0_PID_NIOBE 15 +#define HW_FENCE_IPC_CLIENT_ID_IFE1_PID_NIOBE 16 +#define HW_FENCE_IPC_CLIENT_ID_IFE2_PID_NIOBE 17 +#define HW_FENCE_IPC_CLIENT_ID_IFE3_PID_NIOBE 18 +#define HW_FENCE_IPC_CLIENT_ID_IFE4_PID_NIOBE 19 +#define HW_FENCE_IPC_CLIENT_ID_IFE5_PID_NIOBE 20 +#define HW_FENCE_IPC_CLIENT_ID_IFE6_PID_NIOBE 21 +#define HW_FENCE_IPC_CLIENT_ID_IFE7_PID_NIOBE 22 +#define HW_FENCE_IPC_CLIENT_ID_IFE8_PID_NIOBE 23 +#define HW_FENCE_IPC_CLIENT_ID_IFE9_PID_NIOBE 24 +#define HW_FENCE_IPC_CLIENT_ID_IFE10_PID_NIOBE 25 +#define HW_FENCE_IPC_CLIENT_ID_IFE11_PID_NIOBE 26 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA 2 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE 2 #define HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE 4 #define HW_FENCE_IPC_FENCE_PROTOCOL_ID_SUN 4 +#define HW_FENCE_IPC_FENCE_PROTOCOL_ID_NIOBE 4 #define HW_FENCE_IPCC_HW_REV_170 0x00010700 /* Kalama */ #define HW_FENCE_IPCC_HW_REV_203 0x00020003 /* Pineapple */ #define HW_FENCE_IPCC_HW_REV_2A2 0x00020A02 /* Sun */ +#define HW_FENCE_IPCC_HW_REV_2B4 0x00020B04 /* Niobe */ #define IPC_PROTOCOLp_CLIENTc_VERSION(base, p, c) (base + (0x40000*p) + (0x1000*c)) #define IPC_PROTOCOLp_CLIENTc_CONFIG(base, p, c) (base + 0x8 + (0x40000*p) + (0x1000*c)) diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index a6e1721658..b1fdc45c63 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -16,7 +16,7 @@ * HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE: * Maximum number of client types with configurable number of sub-clients (e.g. IPE, VPU, IFE) */ -#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 10 +#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 14 /** * HW_FENCE_MAX_STATIC_CLIENTS_INDEX: diff --git a/hw_fence/include/msm_hw_fence.h b/hw_fence/include/msm_hw_fence.h index a3e096b5fa..5feb4c3e46 100644 --- a/hw_fence/include/msm_hw_fence.h +++ b/hw_fence/include/msm_hw_fence.h @@ -291,7 +291,11 @@ enum hw_fence_client_id { HW_FENCE_CLIENT_ID_IFE5 = HW_FENCE_CLIENT_ID_IFE4 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, HW_FENCE_CLIENT_ID_IFE6 = HW_FENCE_CLIENT_ID_IFE5 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, HW_FENCE_CLIENT_ID_IFE7 = HW_FENCE_CLIENT_ID_IFE6 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, - HW_FENCE_CLIENT_MAX = HW_FENCE_CLIENT_ID_IFE7 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT + HW_FENCE_CLIENT_ID_IFE8 = HW_FENCE_CLIENT_ID_IFE7 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE9 = HW_FENCE_CLIENT_ID_IFE8 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE10 = HW_FENCE_CLIENT_ID_IFE9 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE11 = HW_FENCE_CLIENT_ID_IFE10 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_MAX = HW_FENCE_CLIENT_ID_IFE11 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT }; #if IS_ENABLED(CONFIG_QTI_HW_FENCE) diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index 25e68869b8..cba3806af0 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -247,6 +247,89 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_sun[HW_FENCE_IPC_MAP_MAX false}, }; +/** + * struct hw_fence_clients_ipc_map_niobe - Table makes the 'client to signal' mapping, which is + * used by the hw fence driver to trigger ipc signal when hw fence is already + * signaled. + * This version is for niobe target. + * + * Note that the index of this struct must match the enum hw_fence_client_id for clients ids less + * than HW_FENCE_MAX_STATIC_CLIENTS_INDEX. + * For clients with configurable sub-clients, the index of this struct matches + * HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC). + */ +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_niobe[HW_FENCE_IPC_MAP_MAX] = { + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 0, true, true, + true, false}, /* ctrlq */ + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID_NIOBE, 0, true, false, + false, true}, /* gfx */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 0, false, false, + true, false}, /* ctl0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 1, false, false, + true, false}, /* ctl1 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 2, false, false, + true, false}, /* ctl2 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 3, false, false, + true, false}, /* ctl3 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 4, false, false, + true, false}, /* ctl4 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 5, false, false, + true, false}, /* ctl5 */ +#if IS_ENABLED(CONFIG_DEBUG_FS) + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 21, true, true, + true, true}, /* val0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 22, true, true, + true, true}, /* val1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 23, true, true, + true, true}, /* val2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 24, true, true, + true, true}, /* val3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 25, true, true, + true, true}, /* val4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 26, true, true, + true, true}, /* val5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 27, true, true, + true, true}, /* val6 */ +#else + {0, 0, 0, false, false, false, false}, /* val0 */ + {0, 0, 0, false, false, false, false}, /* val1 */ + {0, 0, 0, false, false, false, false}, /* val2 */ + {0, 0, 0, false, false, false, false}, /* val3 */ + {0, 0, 0, false, false, false, false}, /* val4 */ + {0, 0, 0, false, false, false, false}, /* val5 */ + {0, 0, 0, false, false, false, false}, /* val6 */ +#endif /* CONFIG_DEBUG_FS */ + {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID_NIOBE, 0, true, true, true, + false}, /* ipe */ + {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID_NIOBE, 0, true, true, true, + false}, /* vpu */ + {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID_NIOBE, 0, false, false, + true, false}, /* ife0 */ + {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID_NIOBE, 0, false, false, + true, false}, /* ife1 */ + {HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID_NIOBE, 0, false, false, + true, false}, /* ife2 */ + {HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID_NIOBE, 0, false, false, + true, false}, /* ife3 */ + {HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID_NIOBE, 0, false, false, + true, false}, /* ife4 */ + {HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID_NIOBE, 0, false, false, + true, false}, /* ife5 */ + {HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID_NIOBE, 0, false, false, + true, false}, /* ife6 */ + {HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID_NIOBE, 0, false, false, + true, false}, /* ife7 */ + {HW_FENCE_IPC_CLIENT_ID_IFE8_VID, HW_FENCE_IPC_CLIENT_ID_IFE8_PID_NIOBE, 0, false, false, + true, false}, /* ife8 */ + {HW_FENCE_IPC_CLIENT_ID_IFE9_VID, HW_FENCE_IPC_CLIENT_ID_IFE9_PID_NIOBE, 0, false, false, + true, false}, /* ife9 */ + {HW_FENCE_IPC_CLIENT_ID_IFE10_VID, HW_FENCE_IPC_CLIENT_ID_IFE10_PID_NIOBE, 0, false, false, + true, false}, /* ife10 */ + {HW_FENCE_IPC_CLIENT_ID_IFE11_VID, HW_FENCE_IPC_CLIENT_ID_IFE11_PID_NIOBE, 0, false, false, + true, false}, /* ife11 */ + +}; + int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id) { if (!drv_data || client_id >= drv_data->clients_num) @@ -482,6 +565,19 @@ static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 hw_fence_clients_ipc_map_sun); HWFNC_DBG_INIT("ipcc protocol_id: Sun\n"); break; + case HW_FENCE_IPCC_HW_REV_2B4: + drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE; + drv_data->ipcc_fctl_vid = drv_data->has_soccp ? HW_FENCE_IPC_CLIENT_ID_SOCCP_VID : + HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_fctl_pid = drv_data->has_soccp ? + HW_FENCE_IPC_CLIENT_ID_SOCCP_PID_NIOBE : + HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE; + drv_data->protocol_id = HW_FENCE_IPC_FENCE_PROTOCOL_ID_NIOBE; /* Fence */ + ret = _hw_fence_ipcc_init_map_with_configurable_clients(drv_data, + hw_fence_clients_ipc_map_niobe); + HWFNC_DBG_INIT("ipcc protocol_id: Niobe\n"); + break; default: HWFNC_ERR("unrecognized ipcc hw-rev:0x%x\n", hwrev); return -1; diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index e8278461c6..b931ed6d53 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -872,7 +872,7 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: /* nothing to initialize for VPU client */ break; - case HW_FENCE_CLIENT_ID_IFE0 ... HW_FENCE_CLIENT_ID_IFE7 + + case HW_FENCE_CLIENT_ID_IFE0 ... HW_FENCE_CLIENT_ID_IFE11 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: /* nothing to initialize for IFE clients */ break; diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index ed3902660f..22991ded51 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -96,6 +96,24 @@ */ #define HW_FENCE_MAX_EVENTS 1000 +/** + * DT_PROPS_CLIENT_NAME_SIZE: + * Maximum number of characters in client name used in device-tree properties + */ +#define DT_PROPS_CLIENT_NAME_SIZE 10 + +/** + * DT_PROPS_CLIENT_PROPS_SIZE: + * Maximum number of characters in property name for base client queue properties. + */ +#define DT_PROPS_CLIENT_PROPS_SIZE (DT_PROPS_CLIENT_NAME_SIZE + 27) + +/** + * DT_PROPS_CLIENT_EXTRA_PROPS_SIZE: + * Maximum number of characters in property name for extra client queue properties. + */ +#define DT_PROPS_CLIENT_EXTRA_PROPS_SIZE (DT_PROPS_CLIENT_NAME_SIZE + 33) + /** * struct hw_fence_client_types - Table describing all supported client types, used to parse * device-tree properties related to client queue size. @@ -142,6 +160,14 @@ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] true}, {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, true}, + {"ife8", HW_FENCE_CLIENT_ID_IFE8, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife9", HW_FENCE_CLIENT_ID_IFE9, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife10", HW_FENCE_CLIENT_ID_IFE10, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife11", HW_FENCE_CLIENT_ID_IFE11, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, }; static void _lock_vm(uint64_t *wait) @@ -904,7 +930,7 @@ static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_d struct hw_fence_client_type_desc *desc) { u32 max_idx_from_zero, payload_size_u32 = HW_FENCE_CLIENT_QUEUE_PAYLOAD / sizeof(u32); - char name[40]; + char name[DT_PROPS_CLIENT_EXTRA_PROPS_SIZE]; u32 tmp[4]; bool idx_by_payload = false; int count, ret; @@ -990,7 +1016,7 @@ exit: static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_data, struct hw_fence_client_type_desc *desc) { - char name[31]; + char name[DT_PROPS_CLIENT_PROPS_SIZE]; u32 tmp[4]; u32 queue_size; int ret; From 547ae8ecfe4d70647ea007f62acaf5f79c97a17b Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 14 Feb 2024 10:10:28 -0800 Subject: [PATCH 121/166] mm-drivers: hw_fence: add support for ipa client queue This change adds support for IPA client, and setup the driver to allocate its TxQ once DT entries are present. Change-Id: I872125b3fc3d8e2f2d0ac596f07c7a37aee023bc Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_ipc.h | 2 ++ hw_fence/include/hw_fence_drv_utils.h | 4 ++-- hw_fence/include/msm_hw_fence.h | 3 ++- hw_fence/src/hw_fence_drv_ipc.c | 4 ++++ hw_fence/src/hw_fence_drv_priv.c | 4 ++++ hw_fence/src/hw_fence_drv_utils.c | 3 +++ hw_fence/src/msm_hw_fence_synx_translation.c | 5 +++++ 7 files changed, 22 insertions(+), 3 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_ipc.h b/hw_fence/include/hw_fence_drv_ipc.h index ff4cbc25f5..f31135d1bb 100644 --- a/hw_fence/include/hw_fence_drv_ipc.h +++ b/hw_fence/include/hw_fence_drv_ipc.h @@ -12,6 +12,7 @@ #define HW_FENCE_IPC_CLIENT_ID_IPE_VID 11 #define HW_FENCE_IPC_CLIENT_ID_VPU_VID 12 #define HW_FENCE_IPC_CLIENT_ID_DPU_VID 25 +#define HW_FENCE_IPC_CLIENT_ID_IPA_VID 26 #define HW_FENCE_IPC_CLIENT_ID_SOCCP_VID 46 #define HW_FENCE_IPC_CLIENT_ID_IFE0_VID 128 #define HW_FENCE_IPC_CLIENT_ID_IFE1_VID 129 @@ -47,6 +48,7 @@ #define HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN 20 #define HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE 2 #define HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE 3 +#define HW_FENCE_IPC_CLIENT_ID_IPA_PID_NIOBE 4 #define HW_FENCE_IPC_CLIENT_ID_GPU_PID_NIOBE 8 #define HW_FENCE_IPC_CLIENT_ID_IPE_PID_NIOBE 10 #define HW_FENCE_IPC_CLIENT_ID_VPU_PID_NIOBE 11 diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index b1fdc45c63..d63bfefbe2 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -14,9 +14,9 @@ /** * HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE: - * Maximum number of client types with configurable number of sub-clients (e.g. IPE, VPU, IFE) + * Maximum number of client types with configurable number of sub-clients (e.g. IPE, VPU, IFE, IPA) */ -#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 14 +#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 15 /** * HW_FENCE_MAX_STATIC_CLIENTS_INDEX: diff --git a/hw_fence/include/msm_hw_fence.h b/hw_fence/include/msm_hw_fence.h index 5feb4c3e46..e39e1b1fdb 100644 --- a/hw_fence/include/msm_hw_fence.h +++ b/hw_fence/include/msm_hw_fence.h @@ -283,7 +283,8 @@ enum hw_fence_client_id { HW_FENCE_CLIENT_ID_VAL6, HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_ID_VPU = HW_FENCE_CLIENT_ID_IPE + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, - HW_FENCE_CLIENT_ID_IFE0 = HW_FENCE_CLIENT_ID_VPU + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IPA = HW_FENCE_CLIENT_ID_VPU + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, + HW_FENCE_CLIENT_ID_IFE0 = HW_FENCE_CLIENT_ID_IPA + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, HW_FENCE_CLIENT_ID_IFE1 = HW_FENCE_CLIENT_ID_IFE0 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, HW_FENCE_CLIENT_ID_IFE2 = HW_FENCE_CLIENT_ID_IFE1 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, HW_FENCE_CLIENT_ID_IFE3 = HW_FENCE_CLIENT_ID_IFE2 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT, diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index cba3806af0..6ebe4ca010 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -155,6 +155,7 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_IPC_MAP_MAX] false}, {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true, true, false}, + {0, 0, 0, false, false, false, false}, /* ipa */ {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, false, true, false}, {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, false, true, @@ -229,6 +230,7 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_sun[HW_FENCE_IPC_MAP_MAX false}, {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true, true, false}, + {0, 0, 0, false, false, false, false}, /* ipa */ {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, false, true, false}, {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, false, true, @@ -303,6 +305,8 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_niobe[HW_FENCE_IPC_MAP_M false}, /* ipe */ {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID_NIOBE, 0, true, true, true, false}, /* vpu */ + {HW_FENCE_IPC_CLIENT_ID_IPA_VID, HW_FENCE_IPC_CLIENT_ID_IPA_PID_NIOBE, 0, true, true, true, + false}, /* ipa */ {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID_NIOBE, 0, false, false, true, false}, /* ife0 */ {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID_NIOBE, 0, false, false, diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index b931ed6d53..b768c12cf7 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -872,6 +872,10 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: /* nothing to initialize for VPU client */ break; + case HW_FENCE_CLIENT_ID_IPA ... HW_FENCE_CLIENT_ID_IPA + + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: + /* nothing to initialize for IPA clients */ + break; case HW_FENCE_CLIENT_ID_IFE0 ... HW_FENCE_CLIENT_ID_IFE11 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: /* nothing to initialize for IFE clients */ diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 22991ded51..cf58845a80 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -59,6 +59,7 @@ #define HW_FENCE_CLIENT_TYPE_MAX_IPE 32 #define HW_FENCE_CLIENT_TYPE_MAX_VPU 32 #define HW_FENCE_CLIENT_TYPE_MAX_IFE 32 +#define HW_FENCE_CLIENT_TYPE_MAX_IPA 32 /** * HW_FENCE_CLIENT_ID_CTRL_QUEUE: @@ -144,6 +145,8 @@ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] 0, 0, 0, 0, 0, 0, false}, {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false}, + {"ipa", HW_FENCE_CLIENT_ID_IPA, HW_FENCE_CLIENT_TYPE_MAX_IPA, 0, 1, 0, 0, 0, 0, 0, 0, + false}, {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, true}, {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 91ce2f5a0d..ab6cac2396 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -46,6 +46,11 @@ static enum hw_fence_client_id _get_hw_fence_client_id(enum synx_client_id synx_ hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_DPU0_CTL0 + HW_FENCE_CLIENT_ID_CTL0; break; + case SYNX_CLIENT_HW_FENCE_IPA_CTX0 ... SYNX_CLIENT_HW_FENCE_IPA_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT - 1: + hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IPA_CTX0 + + HW_FENCE_CLIENT_ID_IPA; + break; case SYNX_CLIENT_HW_FENCE_IFE0_CTX0 ... SYNX_CLIENT_HW_FENCE_IFE7_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT - 1: hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IFE0_CTX0 + From 93c8d70a62a428bc99632cd899a357445058791e Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 19 Mar 2024 11:49:49 -0700 Subject: [PATCH 122/166] mm-drivers: hw_fence: update version checks for soccp driver SoCCP Driver is available on earlier kernel versions. Update kernel version check for rproc_set_state api accordingly. Change-Id: Id7b318a0e6022049f54478bcf123810f560081ff Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 1046c6449e..174e1a5d24 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -9,7 +9,7 @@ #include #include #include -#if (KERNEL_VERSION(6, 5, 0) <= LINUX_VERSION_CODE) +#if (KERNEL_VERSION(6, 1, 25) <= LINUX_VERSION_CODE) #include #endif #include @@ -32,7 +32,7 @@ static int _set_power_vote_if_needed(struct hw_fence_driver_data *drv_data, if (drv_data->has_soccp && hw_fence_client->client_id_ext >= HW_FENCE_CLIENT_ID_VAL0 && hw_fence_client->client_id_ext <= HW_FENCE_CLIENT_ID_VAL6) { -#if (KERNEL_VERSION(6, 5, 0) <= LINUX_VERSION_CODE) +#if (KERNEL_VERSION(6, 1, 25) <= LINUX_VERSION_CODE) ret = rproc_set_state(drv_data->soccp_rproc, state); #else ret = -EINVAL; From 18c42da4040b17c3df6c154ee831cfc7f569ec86 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 19 Mar 2024 11:10:19 -0700 Subject: [PATCH 123/166] mm-drivers: hw_fence: support clients that skip fctl ref Add support for clients that create hw-fences without fctl refcount, or the refcount held until fence controller processes the fence. Instead, this refcount is set when the producer client calls synx_import. Change-Id: I728b390ab2bc30cdb9060cc0d014e699918344ff Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 6 +++ hw_fence/include/hw_fence_drv_utils.h | 11 +++++ hw_fence/src/hw_fence_drv_priv.c | 8 +++- hw_fence/src/hw_fence_drv_utils.c | 63 +++++++++++++++++---------- hw_fence/src/msm_hw_fence.c | 2 + 5 files changed, 67 insertions(+), 23 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 01e48a3626..8c5b2f7535 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -204,6 +204,8 @@ enum payload_type { * wait on an already signaled fence * @signaled_send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences * @txq_update_send_ipc: bool to indicate if client requires ipc interrupt for txq updates + * @skip_fctl_ref: bool to indicate if client-created fences should not have fctl refcount during + * initial creation; this refcount is instead set during synx_import call * @context_id: context id for fences created internally * @seqno: sequence no for fences created internally * @wait_queue: wait queue for the validation clients @@ -225,6 +227,7 @@ struct msm_hw_fence_client { bool signaled_update_rxq; bool signaled_send_ipc; bool txq_update_send_ipc; + bool skip_fctl_ref; u64 context_id; atomic_t seqno; #if IS_ENABLED(CONFIG_DEBUG_FS) @@ -291,6 +294,8 @@ struct msm_hw_fence_dbg_data { * @txq_idx_factor: factor to multiply custom TxQ idx to get index in dwords (one by default) * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence * driver and hfi_header->tx_wm is updated instead + * @skip_fctl_ref: bool to indicate if client-created fences should not have fctl refcount during + * initial creation; this refcount is instead set during synx_import call */ struct hw_fence_client_type_desc { char *name; @@ -305,6 +310,7 @@ struct hw_fence_client_type_desc { u32 txq_idx_start; u32 txq_idx_factor; bool skip_txq_wr_idx; + bool skip_fctl_ref; }; /** diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index d63bfefbe2..fb13db57dd 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -174,4 +174,15 @@ enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver */ int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id); +/** + * hw_fence_utils_get_skip_fctl_ref() - Returns if client avoids creating fences with fctl + * refcount initialized. + * + * @drv_data: driver data + * @client_id: hw fence driver client id + * + * Returns: number of client queues + */ +int hw_fence_utils_get_skip_fctl_ref(struct hw_fence_driver_data *drv_data, int client_id); + #endif /* __HW_FENCE_DRV_UTILS_H */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index b768c12cf7..895cdc6c78 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1536,7 +1536,6 @@ int hw_fence_create(struct hw_fence_driver_data *drv_data, { u32 client_id = hw_fence_client->client_id; struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; - int ret = 0; /* allocate hw fence in table */ @@ -1547,6 +1546,13 @@ int hw_fence_create(struct hw_fence_driver_data *drv_data, ret = -EINVAL; } + if (hw_fence_client->skip_fctl_ref) { + ret = hw_fence_destroy_refcount(drv_data, *hash, HW_FENCE_FCTL_REFCOUNT); + if (ret) + HWFNC_ERR("Can't remove fctl ref client:%u ctx:%llu seqno:%llu hash:%llu\n", + client_id, context, seqno, *hash); + } + return ret; } diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index cf58845a80..d26f80d30d 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -136,41 +136,41 @@ */ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = { {"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false, false}, {"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false, false}, {"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false, false}, {"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, HW_FENCE_CLIENT_QUEUES, - 0, 0, 0, 0, 0, 0, false}, + 0, 0, 0, 0, 0, 0, false, false}, {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, HW_FENCE_CLIENT_QUEUES, - 0, 0, 0, 0, 0, 0, false}, + 0, 0, 0, 0, 0, 0, false, false}, {"ipa", HW_FENCE_CLIENT_ID_IPA, HW_FENCE_CLIENT_TYPE_MAX_IPA, 0, 1, 0, 0, 0, 0, 0, 0, - false}, + false, false}, {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, - true}, + true, false}, {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, - true}, + true, false}, {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, - true}, + true, false}, {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, - true}, + true, false}, {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, - true}, + true, false}, {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, - true}, + true, false}, {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, - true}, + true, false}, {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, - true}, + true, false}, {"ife8", HW_FENCE_CLIENT_ID_IFE8, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, - true}, + true, false}, {"ife9", HW_FENCE_CLIENT_ID_IFE9, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, - true}, + true, false}, {"ife10", HW_FENCE_CLIENT_ID_IFE10, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, - true}, + true, false}, {"ife11", HW_FENCE_CLIENT_ID_IFE11, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, - true}, + true, false}, }; static void _lock_vm(uint64_t *wait) @@ -934,7 +934,7 @@ static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_d { u32 max_idx_from_zero, payload_size_u32 = HW_FENCE_CLIENT_QUEUE_PAYLOAD / sizeof(u32); char name[DT_PROPS_CLIENT_EXTRA_PROPS_SIZE]; - u32 tmp[4]; + u32 tmp[5]; bool idx_by_payload = false; int count, ret; @@ -946,7 +946,7 @@ static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_d return 0; count = of_property_count_u32_elems(drv_data->dev->of_node, name); - if (count <= 0 || count > 4) { + if (count <= 0 || count > 5) { HWFNC_ERR("invalid %s extra dt props count:%d\n", desc->name, count); return -EINVAL; } @@ -973,6 +973,14 @@ static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_d idx_by_payload = tmp[3]; desc->txq_idx_factor = idx_by_payload ? payload_size_u32 : 1; } + if (count >= 5) { + if (tmp[4] > 1) { + HWFNC_ERR("%s invalid skip_fctl_ref prop:%u\n", desc->name, tmp[4]); + ret = -EINVAL; + goto exit; + } + desc->skip_fctl_ref = 1; + } if (desc->start_padding % sizeof(u32) || desc->end_padding % sizeof(u32) || (desc->start_padding + desc->end_padding) % sizeof(u64)) { @@ -1008,9 +1016,9 @@ static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_d goto exit; } - HWFNC_DBG_INIT("%s: start_p=%u end_p=%u txq_idx_start:%u txq_idx_by_payload:%s\n", + HWFNC_DBG_INIT("%s: start_p=%u end_p=%u txq_idx_start:%u idx_by_payload:%s skip_ref:%s\n", desc->name, desc->start_padding, desc->end_padding, desc->txq_idx_start, - idx_by_payload ? "true" : "false"); + idx_by_payload ? "true" : "false", desc->skip_fctl_ref ? "true" : "false"); exit: return ret; @@ -1337,3 +1345,14 @@ int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int cli return drv_data->hw_fence_client_queue_size[client_id].type->queues_num; } + +int hw_fence_utils_get_skip_fctl_ref(struct hw_fence_driver_data *drv_data, int client_id) +{ + if (!drv_data || client_id >= drv_data->clients_num || + !drv_data->hw_fence_client_queue_size[client_id].type) { + HWFNC_ERR("invalid access to client:%d skip_fctl_ref\n", client_id); + return 0; + } + + return drv_data->hw_fence_client_queue_size[client_id].type->skip_fctl_ref; +} diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 7444b9d7d7..66961a5f06 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -130,6 +130,8 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, ret = -EINVAL; goto error; } + hw_fence_client->skip_fctl_ref = hw_fence_utils_get_skip_fctl_ref(hw_fence_drv_data, + client_id); /* Alloc Client HFI Headers and Queues */ ret = hw_fence_alloc_client_resources(hw_fence_drv_data, From 46f905eb2010c43d40808d06ac9ba5525fddc607 Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 21 Mar 2024 10:08:00 -0700 Subject: [PATCH 124/166] mm-drivers: hw_fence: clean up h_synx for hw-fences Set hw_fence->h_synx to zero for hw-fences during cleanup. Change-Id: I6d78f5230ab4b41a0f10d73e965690f7ff4b4ca1 Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_priv.c | 1 + 1 file changed, 1 insertion(+) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 9d0a3feb5d..048e6a1dae 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1014,6 +1014,7 @@ static void _cleanup_hw_fence(struct msm_hw_fence *hw_fence) hw_fence->refcount = 0; hw_fence->parents_cnt = 0; hw_fence->pending_child_cnt = 0; + hw_fence->h_synx = 0; for (i = 0; i < MSM_HW_FENCE_MAX_JOIN_PARENTS; i++) hw_fence->parent_list[i] = HW_FENCE_INVALID_PARENT_FENCE; From 681d4770fe31325e23baa37fef7597de17fa2575 Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 29 Feb 2024 16:00:52 -0800 Subject: [PATCH 125/166] mm-drivers: hw_fence: support import by creating client Currently, if a client that created the fence calls synx_import, the client registers to wait on the fence. This change ensures that synx_import by the fence allocator does not result in this client getting notified of fence signal. Instead this function increments hlos refcount and sets the fctl refcount if not already set for the hw-fence. Change-Id: I55772a46cf7f428d43be02b9f3e87102e9f2d54a Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_priv.c | 23 ++++++++++++++++------- hw_fence/src/msm_hw_fence.c | 6 ------ 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 9d0a3feb5d..1a1bef9a3d 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1971,6 +1971,7 @@ error_array: * Registers the hw-fence client for wait on a hw-fence and keeps a reference on that hw-fence. * The hw-fence must be explicitly dereferenced following this function, e.g. by client * synx_release call. + * This function does not register the fence_allocator as a waiting client. * * Note: This is the only place where the hw-fence refcount is retained for the client to release. * In all other places, the HW Fence Driver releases the refcount held for processing. @@ -1981,7 +1982,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, { struct msm_hw_fence *hw_fence; enum hw_fence_client_data_id data_id; - bool is_signaled; + bool is_signaled = false; if (client_data) { data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext); @@ -2005,12 +2006,20 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ - /* register client in the hw fence */ - is_signaled = hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL; - hw_fence->wait_client_mask |= BIT(hw_fence_client->client_id); - hw_fence->fence_wait_time = hw_fence_get_qtime(drv_data); - if (client_data) - hw_fence->client_data[data_id] = client_data; + /* + * If a creating client calls synx_import, then an additional hlos refcount is taken and a + * refcount is set for processing this fence in FenceCTL + */ + if (hw_fence->fence_allocator == hw_fence_client->client_id) { + hw_fence->refcount |= HW_FENCE_FCTL_REFCOUNT; + } else { + /* register client in the hw fence */ + is_signaled = hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL; + hw_fence->wait_client_mask |= BIT(hw_fence_client->client_id); + hw_fence->fence_wait_time = hw_fence_get_qtime(drv_data); + if (client_data) + hw_fence->client_data[data_id] = client_data; + } /* update memory for the table update */ wmb(); diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 1046c6449e..42c25cca2a 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -400,12 +400,6 @@ int msm_hw_fence_wait_update_v2(void *client_handle, return -EINVAL; } - if (hw_fence_client->client_id > hw_fence_drv_data->rxq_clients_num) { - HWFNC_ERR("Transmit-only client client_id:%d client_id_ext:%d register for wait\n", - hw_fence_client->client_id, hw_fence_client->client_id_ext); - return -EINVAL; - } - HWFNC_DBG_H("+\n"); /* Process all the list of fences */ From fc137eade861283a701aef2a60d867e96d876477 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 13 Sep 2023 20:51:56 -0700 Subject: [PATCH 126/166] mm-drivers: hw_fence: support multiple registrations for a client Allow hw-fence clients to call client registration more than one time. In subsequent registration calls, client is not registered again, but instead the internal previously allocated info is returned. Change-Id: If07d0d46dc742849891c3b3c29ab86a96c36f8a6 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 2 ++ hw_fence/src/msm_hw_fence.c | 49 ++++++++++++++++++++-------- 2 files changed, 38 insertions(+), 13 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 01e48a3626..b8ed26b1ac 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -208,6 +208,7 @@ enum payload_type { * @seqno: sequence no for fences created internally * @wait_queue: wait queue for the validation clients * @val_signal: doorbell flag to signal the validation clients in the wait queue + * @kref: number of active references to this client */ struct msm_hw_fence_client { enum hw_fence_client_id client_id; @@ -227,6 +228,7 @@ struct msm_hw_fence_client { bool txq_update_send_ipc; u64 context_id; atomic_t seqno; + struct kref kref; #if IS_ENABLED(CONFIG_DEBUG_FS) wait_queue_head_t wait_queue; atomic_t val_signal; diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 174e1a5d24..249b4b9ec2 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -24,14 +24,13 @@ struct hw_fence_driver_data *hw_fence_drv_data; bool hw_fence_driver_enable; static int _set_power_vote_if_needed(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, bool state) + u32 client_id, bool state) { int ret = 0; #if IS_ENABLED(CONFIG_DEBUG_FS) - if (drv_data->has_soccp && - hw_fence_client->client_id_ext >= HW_FENCE_CLIENT_ID_VAL0 && - hw_fence_client->client_id_ext <= HW_FENCE_CLIENT_ID_VAL6) { + if (drv_data->has_soccp && client_id >= HW_FENCE_CLIENT_ID_VAL0 && + client_id <= HW_FENCE_CLIENT_ID_VAL6) { #if (KERNEL_VERSION(6, 1, 25) <= LINUX_VERSION_CODE) ret = rproc_set_state(drv_data->soccp_rproc, state); #else @@ -43,6 +42,13 @@ static int _set_power_vote_if_needed(struct hw_fence_driver_data *drv_data, return ret; } +static void msm_hw_fence_client_destroy(struct kref *kref) +{ + struct msm_hw_fence_client *hw_fence_client = container_of(kref, + struct msm_hw_fence_client, kref); + hw_fence_cleanup_client(hw_fence_drv_data, hw_fence_client); +} + void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, struct msm_hw_fence_mem_addr *mem_descriptor) { @@ -76,14 +82,27 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, hw_fence_client = kzalloc(sizeof(*hw_fence_client), GFP_KERNEL); if (!hw_fence_client) return ERR_PTR(-ENOMEM); + kref_init(&hw_fence_client->kref); /* Avoid race condition if multiple-threads request same client at same time */ mutex_lock(&hw_fence_drv_data->clients_register_lock); if (hw_fence_drv_data->clients[client_id]) { - HWFNC_ERR("client with id %d already registered\n", client_id); + kref_get(&hw_fence_drv_data->clients[client_id]->kref); mutex_unlock(&hw_fence_drv_data->clients_register_lock); + HWFNC_DBG_INIT("client with id %d already registered\n", client_id); kfree(hw_fence_client); - return ERR_PTR(-EINVAL); + + /* Client already exists, return the pointer to the client and populate mem desc */ + hw_fence_client = hw_fence_drv_data->clients[client_id]; + + /* Init client memory descriptor */ + if (!IS_ERR_OR_NULL(mem_descriptor)) + memcpy(mem_descriptor, &hw_fence_client->mem_descriptor, + sizeof(struct msm_hw_fence_mem_addr)); + else + HWFNC_DBG_L("null mem descriptor, skipping copy\n"); + + return hw_fence_client; } /* Mark client as registered */ @@ -162,7 +181,7 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, init_waitqueue_head(&hw_fence_client->wait_queue); #endif /* CONFIG_DEBUG_FS */ - ret = _set_power_vote_if_needed(hw_fence_drv_data, hw_fence_client, true); + ret = _set_power_vote_if_needed(hw_fence_drv_data, hw_fence_client->client_id_ext, true); if (ret) { HWFNC_ERR("set soccp power vote failed, fail client:%u registration ret:%d\n", hw_fence_client->client_id_ext, ret); @@ -173,7 +192,7 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, error: /* Free all the allocated resources */ - hw_fence_cleanup_client(hw_fence_drv_data, hw_fence_client); + kref_put(&hw_fence_client->kref, msm_hw_fence_client_destroy); HWFNC_ERR("failed with error:%d\n", ret); return ERR_PTR(ret); @@ -183,13 +202,16 @@ EXPORT_SYMBOL_GPL(msm_hw_fence_register); int msm_hw_fence_deregister(void *client_handle) { struct msm_hw_fence_client *hw_fence_client; - int ret; + bool destroyed_client; + u32 client_id; + int ret = 0; if (IS_ERR_OR_NULL(client_handle)) { HWFNC_ERR("Invalid client handle\n"); return -EINVAL; } hw_fence_client = (struct msm_hw_fence_client *)client_handle; + client_id = hw_fence_client->client_id_ext; if (hw_fence_client->client_id >= hw_fence_drv_data->clients_num) { HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id); @@ -198,14 +220,15 @@ int msm_hw_fence_deregister(void *client_handle) HWFNC_DBG_H("+\n"); - ret = _set_power_vote_if_needed(hw_fence_drv_data, hw_fence_client, false); + /* Free all the allocated resources */ + destroyed_client = kref_put(&hw_fence_client->kref, msm_hw_fence_client_destroy); + + if (destroyed_client) + ret = _set_power_vote_if_needed(hw_fence_drv_data, client_id, false); if (ret) HWFNC_ERR("remove soccp power vote failed, fail client:%u deregistration ret:%d\n", hw_fence_client->client_id_ext, ret); - /* Free all the allocated resources */ - hw_fence_cleanup_client(hw_fence_drv_data, hw_fence_client); - HWFNC_DBG_H("-\n"); return 0; From f345a146d5bac4b81d1d9cdec194ce4ce9299922 Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 11 Mar 2024 12:01:08 -0700 Subject: [PATCH 127/166] mm-drivers: hw_fence: add target-based defconfig for hw-fence Add target-based defconfig for hw-fence driver and define a new Kconfig for using synx implementation. Remove compilation support for synx when not using bazel. Change-Id: Iad71d6e60b1848e80d2bceb7a1c8881f2be27b8e Signed-off-by: Grace An --- hw_fence/Kbuild | 6 ------ hw_fence/Kconfig | 11 ++++++++++- hw_fence/defconfig | 1 - hw_fence/define_hw_fence.bzl | 10 +++++++--- hw_fence/pineapple_defconfig | 2 ++ hw_fence/sun_defconfig | 2 ++ 6 files changed, 21 insertions(+), 11 deletions(-) delete mode 100644 hw_fence/defconfig create mode 100644 hw_fence/pineapple_defconfig create mode 100644 hw_fence/sun_defconfig diff --git a/hw_fence/Kbuild b/hw_fence/Kbuild index 3bcd693da7..2cf74d291b 100644 --- a/hw_fence/Kbuild +++ b/hw_fence/Kbuild @@ -14,12 +14,6 @@ msm_hw_fence-y := src/msm_hw_fence.o \ src/hw_fence_drv_debug.o \ src/hw_fence_drv_ipc.o -ifneq ($(CONFIG_ARCH_KALAMA), y) -LINUXINCLUDE += -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/msm/synx/ \ - -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/include/uapi/synx/media/ -msm_hw_fence-y += src/msm_hw_fence_synx_translation.o -endif - msm_hw_fence-$(CONFIG_DEBUG_FS) += src/hw_fence_ioctl.o CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/hw_fence/Kconfig b/hw_fence/Kconfig index a50b02eefd..75205f2a59 100644 --- a/hw_fence/Kconfig +++ b/hw_fence/Kconfig @@ -1,4 +1,13 @@ config QTI_HW_FENCE bool "HW Fence" help - Enable the hw_fence module \ No newline at end of file + Enable the hw_fence module + +config QTI_HW_FENCE_USE_SYNX + bool "HW Fence uses synx" + help + Enable the hw_fence module through synx api. + This will enable hw-fence module to register + hw-fence ops with synx module to support hw- + fencing through synx api and inter-op + functionality between synx and hw-fence. diff --git a/hw_fence/defconfig b/hw_fence/defconfig deleted file mode 100644 index f80d4f65f7..0000000000 --- a/hw_fence/defconfig +++ /dev/null @@ -1 +0,0 @@ -CONFIG_QTI_HW_FENCE=y diff --git a/hw_fence/define_hw_fence.bzl b/hw_fence/define_hw_fence.bzl index d6d674f570..e29944be1d 100644 --- a/hw_fence/define_hw_fence.bzl +++ b/hw_fence/define_hw_fence.bzl @@ -8,20 +8,24 @@ def _define_module(target, variant): name = "{}_msm_hw_fence".format(tv), srcs = [ "src/hw_fence_drv_debug.c", - "src/hw_fence_drv_interop.c", "src/hw_fence_drv_ipc.c", "src/hw_fence_drv_priv.c", "src/hw_fence_drv_utils.c", "src/msm_hw_fence.c", - "src/msm_hw_fence_synx_translation.c", ], out = "msm_hw_fence.ko", - defconfig = "defconfig", + defconfig = "{}_defconfig".format(target), kconfig = "Kconfig", conditional_srcs = { "CONFIG_DEBUG_FS": { True: ["src/hw_fence_ioctl.c"], }, + "CONFIG_QTI_HW_FENCE_USE_SYNX" : { + True: [ + "src/msm_hw_fence_synx_translation.c", + "src/hw_fence_drv_interop.c", + ] + }, }, deps = [ "//msm-kernel:all_headers", diff --git a/hw_fence/pineapple_defconfig b/hw_fence/pineapple_defconfig new file mode 100644 index 0000000000..b39eb5efbe --- /dev/null +++ b/hw_fence/pineapple_defconfig @@ -0,0 +1,2 @@ +CONFIG_QTI_HW_FENCE=y +CONFIG_QTI_HW_FENCE_USE_SYNX=y diff --git a/hw_fence/sun_defconfig b/hw_fence/sun_defconfig new file mode 100644 index 0000000000..b39eb5efbe --- /dev/null +++ b/hw_fence/sun_defconfig @@ -0,0 +1,2 @@ +CONFIG_QTI_HW_FENCE=y +CONFIG_QTI_HW_FENCE_USE_SYNX=y From afb4c098a66282432df1525714495aac301e3e4a Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 28 Feb 2024 17:34:53 -0800 Subject: [PATCH 128/166] mm-drivers: hw_fence: update client queue with synx hwfence handle flag This change ensures that client rx queues are updated with hw-fence handle flag that is used for synx api. Change-Id: I459feee252c9b55b122525cbd213660427493771 Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_priv.c | 7 ++++++- hw_fence/src/msm_hw_fence_synx_translation.c | 1 - 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 9d0a3feb5d..02700646e3 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -12,6 +12,11 @@ #include "hw_fence_drv_ipc.h" #include "hw_fence_drv_debug.h" #include "hw_fence_drv_fence.h" +#if IS_ENABLED(CONFIG_QTI_HW_FENCE_USE_SYNX) +#include +#else +#define SYNX_HW_FENCE_HANDLE_FLAG 0 +#endif /* CONFIG_QTI_HW_FENCE_USE_SYNX */ /* Global atomic lock */ #define GLOBAL_ATOMIC_STORE(drv_data, lock, val) global_atomic_store(drv_data, lock, val) @@ -471,7 +476,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, writew_relaxed(HW_FENCE_PAYLOAD_REV(1, 0), &write_ptr_payload->version); writeq_relaxed(ctxt_id, &write_ptr_payload->ctxt_id); writeq_relaxed(seqno, &write_ptr_payload->seqno); - writeq_relaxed(hash, &write_ptr_payload->hash); + writeq_relaxed(hash | SYNX_HW_FENCE_HANDLE_FLAG, &write_ptr_payload->hash); writeq_relaxed(flags, &write_ptr_payload->flags); writeq_relaxed(client_data, &write_ptr_payload->client_data); writel_relaxed(error, &write_ptr_payload->error); diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 91ce2f5a0d..432bdfdccb 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -246,7 +246,6 @@ static int synx_hwfence_wait(struct synx_session *session, u32 h_synx, u64 timeo } #if IS_ENABLED(CONFIG_DEBUG_FS) - h_synx &= HW_FENCE_HANDLE_INDEX_MASK; if (session->type >= SYNX_CLIENT_HW_FENCE_TEST_CTX0 && session->type <= SYNX_CLIENT_HW_FENCE_TEST_CTX0 + MAX_SUPPORTED_TEST) ret = hw_fence_debug_wait_val(hw_fence_drv_data, session->client, NULL, h_synx, From 549a2cce75ed96df19bc985c403842f7ea996f5e Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 7 Mar 2024 17:32:52 -0800 Subject: [PATCH 129/166] mm-drivers: hw_fence: add mask to handle comparison for synx_wait The handle received through client rx queue may or may not have handle flag bit set. Thus, add mask to handle comparison when an apps loopback client reads the entry in client rx queue. Change-Id: Ibce45d3d1f2de73fe91b6ce2c2ce8c88ab37570c Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_debug.h | 2 +- hw_fence/src/hw_fence_drv_debug.c | 8 ++++---- hw_fence/src/hw_fence_ioctl.c | 2 +- hw_fence/src/msm_hw_fence_synx_translation.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index 3b7e06bcf8..dd3856d3e8 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -75,7 +75,7 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data); int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, int client_id); int hw_fence_debug_wait_val(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 hash, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 hash, u64 mask, u64 timeout_ms, u32 *error); void hw_fence_debug_dump_queues(struct hw_fence_driver_data *drv_data, enum hw_fence_drv_prio prio, diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 8d8c137c9a..7ea1ad4d94 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -1176,7 +1176,7 @@ int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, static long _process_val_signal(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, - struct dma_fence *fence, u64 hash, u32 *error) + struct dma_fence *fence, u64 hash, u64 mask, u32 *error) { struct msm_hw_fence_queue_payload payload; int read = 1, queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */ @@ -1198,7 +1198,7 @@ static long _process_val_signal(struct hw_fence_driver_data *drv_data, HWFNC_DBG_L("rxq read: hash:%llu, flags:%llu, error:%u\n", payload.hash, payload.flags, payload.error); if ((fence && payload.ctxt_id == context && payload.seqno == seqno) || - hash == payload.hash) { + (mask && ((mask & hash) == (mask & payload.hash)))) { *error = payload.error; return 0; } @@ -1212,7 +1212,7 @@ static long _process_val_signal(struct hw_fence_driver_data *drv_data, } int hw_fence_debug_wait_val(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 hash, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 hash, u64 mask, u64 timeout_ms, u32 *error) { ktime_t cur_ktime, exp_ktime; @@ -1239,7 +1239,7 @@ int hw_fence_debug_wait_val(struct hw_fence_driver_data *drv_data, dma_fence_put(fence); return -ETIMEDOUT; } - ret = _process_val_signal(drv_data, hw_fence_client, fence, hash, error); + ret = _process_val_signal(drv_data, hw_fence_client, fence, hash, mask, error); /* if val client fails to find expected fence, keep waiting until timeout */ } diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 507d4a3761..f75fcdd874 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -506,7 +506,7 @@ static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg) return -EINVAL; } - ret = hw_fence_debug_wait_val(hw_fence_drv_data, hw_fence_client, fence, 0, + ret = hw_fence_debug_wait_val(hw_fence_drv_data, hw_fence_client, fence, 0, 0, data.timeout_ms, &error); if (ret) HWFNC_ERR("failed to wait for hw-fence client:%d ctx:%llu seq:%llu\n", diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 432bdfdccb..2a8587950f 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -249,7 +249,7 @@ static int synx_hwfence_wait(struct synx_session *session, u32 h_synx, u64 timeo if (session->type >= SYNX_CLIENT_HW_FENCE_TEST_CTX0 && session->type <= SYNX_CLIENT_HW_FENCE_TEST_CTX0 + MAX_SUPPORTED_TEST) ret = hw_fence_debug_wait_val(hw_fence_drv_data, session->client, NULL, h_synx, - timeout_ms, &error); + HW_FENCE_HANDLE_INDEX_MASK, timeout_ms, &error); #endif /* CONFIG_DEBUG_FS */ if (ret) { From 3476b9743ca68db6df294a659cd1a333eba4065c Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 2 Apr 2024 09:31:21 -0700 Subject: [PATCH 130/166] mm-drivers: hw_fence: add defconfig for niobe target Add defconfig file to define kconfig settings for niobe target. Change-Id: Ib1bcfb1525c99fa19f5b3f12014208e41beab55b Signed-off-by: Grace An --- hw_fence/niobe_defconfig | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 hw_fence/niobe_defconfig diff --git a/hw_fence/niobe_defconfig b/hw_fence/niobe_defconfig new file mode 100644 index 0000000000..5b903c60cd --- /dev/null +++ b/hw_fence/niobe_defconfig @@ -0,0 +1,2 @@ +CONFIG_QTI_HW_FENCE=y +CONFIG_QTI_HW_FENCE_USE_SYNX=y \ No newline at end of file From f97a9682ddb41c75a5c49150aca09ac5a737551e Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 12 Mar 2024 17:26:46 -0700 Subject: [PATCH 131/166] mm-drivers: hw_fence: signal internally owned dma-fence Add implementation to signal internally owned dma-fences from last synx_release called from hlos. This is expected to be called after synx_signal by client that created the dma-fence; if release happens before signal, an error message is printed. Change-Id: I0ff043772a7607badab1599a21b7a43fa3017615 Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_priv.c | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index c88d532b94..4b2bd4b413 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -16,6 +16,7 @@ #include #else #define SYNX_HW_FENCE_HANDLE_FLAG 0 +#define SYNX_STATE_SIGNALED_CANCEL 4 #endif /* CONFIG_QTI_HW_FENCE_USE_SYNX */ /* Global atomic lock */ @@ -1500,11 +1501,12 @@ struct dma_fence *hw_fence_dma_fence_find(struct hw_fence_driver_data *drv_data, return fence; } -static int hw_fence_dma_fence_table_del(struct hw_fence_driver_data *drv_data, u64 hash) +static int hw_fence_dma_fence_table_del(struct hw_fence_driver_data *drv_data, u64 hash, + u64 flags, u32 error) { struct hw_dma_fence *hw_dma_fence; struct dma_fence *fence; - unsigned long flags; + unsigned long lock_flags; int ret = 0; fence = hw_fence_dma_fence_find(drv_data, hash, false); @@ -1517,19 +1519,30 @@ static int hw_fence_dma_fence_table_del(struct hw_fence_driver_data *drv_data, u fence->context, fence->seqno, hw_dma_fence->dma_fence_key, kref_read(&fence->refcount)); - spin_lock_irqsave(&drv_data->dma_fence_table_lock, flags); + spin_lock_irqsave(&drv_data->dma_fence_table_lock, lock_flags); /* remove dma-fence from the internal hash table */ if (hash_hashed(&hw_dma_fence->node)) hash_del(&hw_dma_fence->node); else ret = -EINVAL; - spin_unlock_irqrestore(&drv_data->dma_fence_table_lock, flags); + spin_unlock_irqrestore(&drv_data->dma_fence_table_lock, lock_flags); if (ret) HWFNC_ERR("internally owned dma-fence is not in table ctx:%llu seqno:%llu key:%u\n", fence->context, fence->seqno, hw_dma_fence->dma_fence_key); + /* avoid signaling hw-fence when releasing hlos ref */ dma_fence_remove_callback(fence, &hw_dma_fence->signal_cb.fence_cb); + + spin_lock_irqsave(fence->lock, lock_flags); + if (!dma_fence_is_signaled(fence)) { + if (!(flags & MSM_HW_FENCE_FLAG_SIGNAL)) + error = SYNX_STATE_SIGNALED_CANCEL; + if (error) + dma_fence_set_error(fence, -error); + dma_fence_signal_locked(fence); + } + spin_unlock_irqrestore(fence->lock, lock_flags); dma_fence_put(fence); return ret; @@ -1590,6 +1603,8 @@ static int hw_fence_put_and_unlock(struct hw_fence_driver_data *drv_data, u32 cl { bool release_dma = false; int ret = 0; + u64 flags; + u32 error; if (hw_fence->refcount & HW_FENCE_HLOS_REFCOUNT_MASK) { hw_fence->refcount--; @@ -1602,6 +1617,8 @@ static int hw_fence_put_and_unlock(struct hw_fence_driver_data *drv_data, u32 cl !(hw_fence->refcount & HW_FENCE_HLOS_REFCOUNT_MASK)) { hw_fence->flags &= ~MSM_HW_FENCE_FLAG_INTERNAL_OWNED; release_dma = true; + flags = hw_fence->flags; + error = hw_fence->error; } if (!hw_fence->refcount) { @@ -1621,7 +1638,7 @@ end: } if (release_dma) { - ret = hw_fence_dma_fence_table_del(drv_data, hash); + ret = hw_fence_dma_fence_table_del(drv_data, hash, flags, error); if (ret) HWFNC_ERR("Failed to delete internal dma-fence for hw-fence hash:%llu\n", hash); From f53124d43fde04f14e15782424cdd06856107b62 Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 5 Apr 2024 15:57:50 -0700 Subject: [PATCH 132/166] mm-drivers: hw_fence: avoid error message when soccp is not ready HW Fence driver probe is deferred when SOCCP has not finished booting up. Avoid error message in this scenario. Change-Id: If7bdf4c2406ba95f9203a0717244f5c7821ffc24 Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index c3dec57724..21d7369b09 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -866,7 +866,8 @@ static int msm_hw_fence_probe(struct platform_device *pdev) return 0; err_exit: - HWFNC_ERR_ONCE("error %d\n", rc); + if (rc != -EPROBE_DEFER) + HWFNC_ERR_ONCE("error %d\n", rc); return rc; } From 51550cb6cdc930cb626ef6d62694c48d3b1e4989 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 9 Apr 2024 11:46:18 -0700 Subject: [PATCH 133/166] mm-drivers: hw_fence: use defconfig for pineapple target Retain old defconfig name for pineapple target to adhere to graphics dependency. Change-Id: I84459c158fd8b0b2ff0ee3e49c67ec3d4cdaa163 Signed-off-by: Grace An --- hw_fence/{pineapple_defconfig => defconfig} | 0 hw_fence/define_hw_fence.bzl | 7 ++++++- 2 files changed, 6 insertions(+), 1 deletion(-) rename hw_fence/{pineapple_defconfig => defconfig} (100%) diff --git a/hw_fence/pineapple_defconfig b/hw_fence/defconfig similarity index 100% rename from hw_fence/pineapple_defconfig rename to hw_fence/defconfig diff --git a/hw_fence/define_hw_fence.bzl b/hw_fence/define_hw_fence.bzl index e29944be1d..849bc62d13 100644 --- a/hw_fence/define_hw_fence.bzl +++ b/hw_fence/define_hw_fence.bzl @@ -4,6 +4,11 @@ load("//msm-kernel:target_variants.bzl", "get_all_variants") def _define_module(target, variant): tv = "{}_{}".format(target, variant) + if target in [ "pineapple" ]: + target_config = "defconfig" + else: + target_config = "{}_defconfig".format(target) + ddk_module( name = "{}_msm_hw_fence".format(tv), srcs = [ @@ -14,7 +19,7 @@ def _define_module(target, variant): "src/msm_hw_fence.c", ], out = "msm_hw_fence.ko", - defconfig = "{}_defconfig".format(target), + defconfig = target_config, kconfig = "Kconfig", conditional_srcs = { "CONFIG_DEBUG_FS": { From c4ed87592e962b57aabeaa47fd78ca5cfd5eefff Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 9 Apr 2024 11:46:18 -0700 Subject: [PATCH 134/166] mm-drivers: hw_fence: use defconfig for pineapple target Retain old defconfig name for pineapple target to adhere to graphics dependency. Change-Id: I84459c158fd8b0b2ff0ee3e49c67ec3d4cdaa163 Signed-off-by: Grace An Signed-off-by: Manoj Amara --- hw_fence/{pineapple_defconfig => defconfig} | 0 hw_fence/define_hw_fence.bzl | 7 ++++++- 2 files changed, 6 insertions(+), 1 deletion(-) rename hw_fence/{pineapple_defconfig => defconfig} (100%) diff --git a/hw_fence/pineapple_defconfig b/hw_fence/defconfig similarity index 100% rename from hw_fence/pineapple_defconfig rename to hw_fence/defconfig diff --git a/hw_fence/define_hw_fence.bzl b/hw_fence/define_hw_fence.bzl index e29944be1d..849bc62d13 100644 --- a/hw_fence/define_hw_fence.bzl +++ b/hw_fence/define_hw_fence.bzl @@ -4,6 +4,11 @@ load("//msm-kernel:target_variants.bzl", "get_all_variants") def _define_module(target, variant): tv = "{}_{}".format(target, variant) + if target in [ "pineapple" ]: + target_config = "defconfig" + else: + target_config = "{}_defconfig".format(target) + ddk_module( name = "{}_msm_hw_fence".format(tv), srcs = [ @@ -14,7 +19,7 @@ def _define_module(target, variant): "src/msm_hw_fence.c", ], out = "msm_hw_fence.ko", - defconfig = "{}_defconfig".format(target), + defconfig = target_config, kconfig = "Kconfig", conditional_srcs = { "CONFIG_DEBUG_FS": { From 359191b132e64eed806383f4c0cee26f2a0287d0 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 10 Apr 2024 16:44:05 -0700 Subject: [PATCH 135/166] mm-drivers: hw_fence: fix lock implementation for soccp Fix lock implementation in hlos so that this always sets BIT-0 in the hw-fence lock. Change-Id: I0ca1fc11d9eaf8c2fc566d8b8fe8903471591e1b Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_utils.c | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 55d6e7fde0..c5b871a22f 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -173,7 +173,7 @@ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] true, false}, }; -static void _lock_vm(uint64_t *wait) +static void _lock(uint64_t *wait) { #if defined(__aarch64__) __asm__( @@ -232,25 +232,6 @@ static void _unlock_vm(struct hw_fence_driver_data *drv_data, uint64_t *lock) } } -static void _lock_soccp(uint64_t *wait) -{ - /* Wait (without WFE) */ -#if defined(__aarch64__) - __asm__("SEVL\n\t" - "PRFM PSTL1KEEP, [%x[i_lock]]\n\t" - "1:\n\t" - "LDAXR W5, [%x[i_lock]]\n\t" - "CBNZ W5, 1b\n\t" - "STXR W5, W0, [%x[i_lock]]\n\t" - "CBNZ W5, 1b\n" - : - : [i_lock] "r" (wait) - : "memory"); -#elif - HWFNC_ERR("cannot lock\n"); -#endif -} - static void _unlock_soccp(uint64_t *lock) { /* Signal Client */ @@ -269,10 +250,7 @@ void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, { if (val) { preempt_disable(); - if (drv_data->has_soccp) - _lock_soccp(lock); - else - _lock_vm(lock); + _lock(lock); } else { if (drv_data->has_soccp) _unlock_soccp(lock); From a29ec766f8df67f6d026e804d21446007fe5c68d Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 12 Apr 2024 10:32:52 -0700 Subject: [PATCH 136/166] mm-drivers: hw_fence: allow use of full number of test clients Currently, initialization of SYNX_HW_FENCE_CLIENT_TEST_CTX6 is not allowed. Allow this initialization by updating the max supported test clients. Change-Id: I34a7dc2d6fcfb4ad3229750ba093131f5d8072ca Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence_synx_translation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 28e6a8a823..37707f45a9 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -18,7 +18,7 @@ * MAX_SUPPORTED_TEST: Maximum number of validation clients supported */ #define MAX_SUPPORTED_DPU0 (HW_FENCE_CLIENT_ID_CTL5 - HW_FENCE_CLIENT_ID_CTL0) -#define MAX_SUPPORTED_TEST (HW_FENCE_CLIENT_ID_VAL6 - HW_FENCE_CLIENT_ID_VAL1) +#define MAX_SUPPORTED_TEST (HW_FENCE_CLIENT_ID_VAL6 - HW_FENCE_CLIENT_ID_VAL0) static enum hw_fence_client_id _get_hw_fence_client_id(enum synx_client_id synx_client_id) { From 445d737a7692ad434f9e05effe3ffe8cdaa0a4f1 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 23 Apr 2024 09:14:24 -0700 Subject: [PATCH 137/166] mm-drivers: hw_fence: add support for additional ife clients Add support to translate the synx client ids of ife8 through ife11 clients. Change-Id: Ibf79d3d324270936c7251cb16d5312b7ed80e8dd Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence_synx_translation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 28e6a8a823..37f8438a55 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -47,7 +47,7 @@ static enum hw_fence_client_id _get_hw_fence_client_id(enum synx_client_id synx_ hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IPA_CTX0 + HW_FENCE_CLIENT_ID_IPA; break; - case SYNX_CLIENT_HW_FENCE_IFE0_CTX0 ... SYNX_CLIENT_HW_FENCE_IFE7_CTX0 + + case SYNX_CLIENT_HW_FENCE_IFE0_CTX0 ... SYNX_CLIENT_HW_FENCE_IFE11_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT - 1: hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IFE0_CTX0 + HW_FENCE_CLIENT_ID_IFE0; From fc5883f597ef7dd63b4815476e99b822e5d2f2ef Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 22 Mar 2024 16:26:38 -0700 Subject: [PATCH 138/166] mm-drivers: hw_fence: allocate hwfence region at runtime When device-tree is not present for static allocation of carved-out hw-fence memory region, allocate this memory at runtime. Change-Id: Ic511c30c7354dbc0fdf6aabdd588e6a9e683e0a5 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 2 + hw_fence/src/hw_fence_drv_utils.c | 110 ++++++++++++++++++++++----- hw_fence/src/msm_hw_fence.c | 14 ++++ 3 files changed, 107 insertions(+), 19 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 3480f7a257..a6d83f3e7f 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -372,6 +372,7 @@ struct hw_fence_signal_cb { * @rm_nb: hyp resource manager notifier * @memparcel: memparcel for the allocated memory * @used_mem_size: total memory size of global table, lock region, and ctrl and client queues + * @cpu_addr_cookie: bogus cpu address returned by dma_alloc_attrs which is used for freeing memory * @db_label: doorbell label * @rx_dbl: handle to the Rx doorbell * @debugfs_data: debugfs info @@ -446,6 +447,7 @@ struct hw_fence_driver_data { struct notifier_block rm_nb; u32 memparcel; u32 used_mem_size; + void *cpu_addr_cookie; /* doorbell */ u32 db_label; diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index c5b871a22f..98d0562827 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -712,6 +712,7 @@ static int _register_vm_mem_with_hyp(struct hw_fence_driver_data *drv_data, static int _init_soccp_mem(struct hw_fence_driver_data *drv_data) { struct iommu_domain *domain; + u32 shbuf_soccp_va; int ret; if (!drv_data) { @@ -719,6 +720,17 @@ static int _init_soccp_mem(struct hw_fence_driver_data *drv_data) return -EINVAL; } + ret = of_property_read_u32(drv_data->dev->of_node, "shbuf_soccp_va", &shbuf_soccp_va); + if (ret || !shbuf_soccp_va) { + if (drv_data->cpu_addr_cookie) { + HWFNC_ERR("non-static mem allocation w/out soccp_va dt ret:%d val:%d\n", + ret, shbuf_soccp_va); + return -EINVAL; + } + /* use one to one memory mapping if virtual address is not in dt */ + shbuf_soccp_va = drv_data->res.start; + } + domain = iommu_get_domain_for_dev(drv_data->dev); if (IS_ERR_OR_NULL(domain)) { HWFNC_ERR("failed to get iommu domain for device ret:%ld\n", PTR_ERR(domain)); @@ -726,38 +738,37 @@ static int _init_soccp_mem(struct hw_fence_driver_data *drv_data) } #if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE) - ret = iommu_map(domain, drv_data->res.start, drv_data->res.start, drv_data->size, + ret = iommu_map(domain, shbuf_soccp_va, drv_data->res.start, drv_data->size, IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL); #else - ret = iommu_map(domain, drv_data->res.start, drv_data->res.start, drv_data->size, + ret = iommu_map(domain, shbuf_soccp_va, drv_data->res.start, drv_data->size, IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE); #endif - if (ret) - HWFNC_ERR("failed to one-to-one map for soccp smmu addr:0x%llx sz:%lx ret:%d\n", - drv_data->res.start, drv_data->size, ret); - else + if (ret) { + HWFNC_ERR("failed to map for soccp smmu phys_addr:0x%llx va:0x%x sz:%lx ret:%d\n", + drv_data->res.start, shbuf_soccp_va, drv_data->size, ret); + } else { /* * HW Fence Driver resources may not be ready at this point (this is separately * tracked via resources_ready), but we assume soccp is ready once memory mapping * is done. */ drv_data->fctl_ready = true; + HWFNC_DBG_INIT("mapped for soccp smmu phys_addr:0x%llx va:0x%x sz:%lx ret:%d\n", + drv_data->res.start, shbuf_soccp_va, drv_data->size, ret); + } return ret; } -/* Allocates carved-out mapped memory */ -int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) +/* Allocates carved-out mapped memory from device-tree */ +static int _alloc_mem_static(struct hw_fence_driver_data *drv_data, struct device_node *node_compat) { - struct device_node *node = drv_data->dev->of_node; - struct device_node *node_compat; - const char *compat = "qcom,msm-hw-fence-mem"; struct device_node *np; int ret; - node_compat = of_find_compatible_node(node, NULL, compat); - if (!node_compat) { - HWFNC_ERR("Failed to find dev node with compat:%s\n", compat); + if (!drv_data || !node_compat) { + HWFNC_ERR("invalid drv_data:0x%pK node_compat:0x%pK\n", drv_data, node_compat); return -EINVAL; } @@ -774,6 +785,67 @@ int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) return -EINVAL; } + return 0; +} + +/* Allocates memory dynamically */ +static int _alloc_mem_dynamic(struct hw_fence_driver_data *drv_data) +{ + u32 events_size, size; + + if (!drv_data || !drv_data->has_soccp) { + HWFNC_ERR("invalid drv_data:0x%pK has_soccp:%d\n", drv_data, + drv_data ? drv_data->has_soccp : -1); + return -EINVAL; + } + + events_size = HW_FENCE_MAX_EVENTS * sizeof(struct msm_hw_fence_event); + if (drv_data->used_mem_size >= U32_MAX - events_size) { + HWFNC_ERR("invalid used_mem_size:%u events_size:%u\n", drv_data->used_mem_size, + events_size); + return -EINVAL; + } + + size = PAGE_ALIGN(drv_data->used_mem_size + events_size); + drv_data->cpu_addr_cookie = dma_alloc_attrs(drv_data->dev, size, &drv_data->res.start, + GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING); + if (!drv_data->cpu_addr_cookie) { + HWFNC_ERR("memory allocation failed!\n"); + return -ENOMEM; + } + + drv_data->res.end = drv_data->res.start + size - 1; + drv_data->res.name = "hwfence_shbuf"; + HWFNC_DBG_INIT("allocated memory start:0x%llx end:0x%llx size:0x%x\n", drv_data->res.start, + drv_data->res.end, size); + + return 0; +} + +/* Allocates carved-out mapped memory */ +int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) +{ + struct device_node *node = drv_data->dev->of_node; + struct device_node *node_compat; + const char *compat = "qcom,msm-hw-fence-mem"; + int ret; + + node_compat = of_find_compatible_node(node, NULL, compat); + if (!node_compat && !drv_data->has_soccp) { + HWFNC_ERR("Failed to find dev node with compat:%s\n", compat); + return -EINVAL; + } + + if (node_compat) + ret = _alloc_mem_static(drv_data, node_compat); + else + ret = _alloc_mem_dynamic(drv_data); + + if (ret) { + HWFNC_ERR("failed to allocate static or dynamic memory ret:%d\n", ret); + return ret; + } + if (drv_data->has_soccp) drv_data->io_mem_base = memremap(drv_data->res.start, resource_size(&drv_data->res), MEMREMAP_WB); @@ -792,19 +864,19 @@ int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) return -ENOMEM; } - HWFNC_DBG_INIT("io_mem_base:0x%pK start:0x%llx end:0x%llx sz:0x%lx name:%s has_soccp:%s\n", - drv_data->io_mem_base, drv_data->res.start, drv_data->res.end, drv_data->size, - drv_data->res.name, drv_data->has_soccp ? "true" : "false"); - memset_io(drv_data->io_mem_base, 0x0, drv_data->size); + HWFNC_DBG_INIT("va:0x%pK start:0x%llx sz:0x%lx name:%s cookie:0x%pK has_soccp:%s\n", + drv_data->io_mem_base, drv_data->res.start, drv_data->size, drv_data->res.name, + drv_data->cpu_addr_cookie, drv_data->has_soccp ? "true" : "false"); + if (drv_data->has_soccp) ret = _init_soccp_mem(drv_data); else ret = _register_vm_mem_with_hyp(drv_data, node_compat); if (ret) - HWFNC_ERR("failed to share memory with %s va:0x%pK pa:0x%llx sz:0x%lx name:%s\n", + HWFNC_ERR("failed to share mem with %s cpu_va:0x%pK pa:0x%llx sz:0x%lx name:%s\n", drv_data->has_soccp ? "soccp" : "vm", drv_data->io_mem_base, drv_data->res.start, drv_data->size, drv_data->res.name); diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 21d7369b09..0758e08ef6 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -837,6 +837,12 @@ static int msm_hw_fence_probe_init(struct platform_device *pdev) error: dev_set_drvdata(&pdev->dev, NULL); + kfree(hw_fence_drv_data->ipc_clients_table); + kfree(hw_fence_drv_data->hw_fence_client_queue_size); + if (hw_fence_drv_data->cpu_addr_cookie) + dma_free_attrs(hw_fence_drv_data->dev, hw_fence_drv_data->size, + hw_fence_drv_data->cpu_addr_cookie, hw_fence_drv_data->res.start, + DMA_ATTR_NO_KERNEL_MAPPING); kfree(hw_fence_drv_data); hw_fence_drv_data = (void *) -EPROBE_DEFER; @@ -892,6 +898,14 @@ static int msm_hw_fence_remove(struct platform_device *pdev) kthread_stop(hw_fence_drv_data->soccp_listener_thread); dev_set_drvdata(&pdev->dev, NULL); + + /* free memory allocations as part of hw_fence_drv_data */ + kfree(hw_fence_drv_data->ipc_clients_table); + kfree(hw_fence_drv_data->hw_fence_client_queue_size); + if (hw_fence_drv_data->cpu_addr_cookie) + dma_free_attrs(hw_fence_drv_data->dev, hw_fence_drv_data->size, + hw_fence_drv_data->cpu_addr_cookie, hw_fence_drv_data->res.start, + DMA_ATTR_NO_KERNEL_MAPPING); kfree(hw_fence_drv_data); hw_fence_drv_data = (void *) -EPROBE_DEFER; From 077c953f7d33b4409d72f2645a6878490998e1a1 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 7 May 2024 16:57:11 -0700 Subject: [PATCH 139/166] mm-drivers: hw_fence: add kconfig to enable hw-fencing by default Add target-based Kconfig to enable hw-fencing by default. When hw-fencing is enabled by default, hw-fencing can be disabled or re-enabled at runtime through: "fastboot oem set-hw-fence-value [0 or 1]". Change-Id: I36838a8a7cf710373ae8e819bd1b3cee79c84356 Signed-off-by: Grace An --- hw_fence/Kconfig | 9 +++++++++ hw_fence/src/msm_hw_fence.c | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/hw_fence/Kconfig b/hw_fence/Kconfig index 75205f2a59..bcf3546845 100644 --- a/hw_fence/Kconfig +++ b/hw_fence/Kconfig @@ -11,3 +11,12 @@ config QTI_HW_FENCE_USE_SYNX hw-fence ops with synx module to support hw- fencing through synx api and inter-op functionality between synx and hw-fence. + +config QTI_ENABLE_HW_FENCE_DEFAULT + bool "HW Fence is enabled by default" + help + Enable the hw_fence module by default. + This config allow hw-fence client registrations + by default without any fastboot commands. + HW-fencing can still be disabled and reenabled + at runtime through fastboot commands. diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 21d7369b09..4e7a817048 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -21,7 +21,11 @@ #include "hw_fence_drv_fence.h" struct hw_fence_driver_data *hw_fence_drv_data; +#if IS_ENABLED(CONFIG_QTI_ENABLE_HW_FENCE_DEFAULT) +bool hw_fence_driver_enable = true; +#else bool hw_fence_driver_enable; +#endif static int _set_power_vote_if_needed(struct hw_fence_driver_data *drv_data, u32 client_id, bool state) From 7b3ffd78d1a46906c44fa8612ac4b815fb71f0df Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 29 Apr 2024 17:06:43 -0700 Subject: [PATCH 140/166] mm-drivers: hw_fence: enable hw-fencing by default on niobe target This change will enable hw-fencing by default on niobe target. Change-Id: I4d9595d837d06b08150a63694f1e0107c05f2b57 Signed-off-by: Grace An --- hw_fence/niobe_defconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hw_fence/niobe_defconfig b/hw_fence/niobe_defconfig index 5b903c60cd..ba00c1a470 100644 --- a/hw_fence/niobe_defconfig +++ b/hw_fence/niobe_defconfig @@ -1,2 +1,3 @@ CONFIG_QTI_HW_FENCE=y -CONFIG_QTI_HW_FENCE_USE_SYNX=y \ No newline at end of file +CONFIG_QTI_HW_FENCE_USE_SYNX=y +CONFIG_QTI_ENABLE_HW_FENCE_DEFAULT=y \ No newline at end of file From a7df8b84bb86f43857d01d2b970e1d13a1f3d8f4 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 19 Mar 2024 15:10:31 -0700 Subject: [PATCH 141/166] mm-drivers: hw_fence: enable hw-fencing by default on sun target Add target-based Kconfig to enable hw-fencing by default and enable hw-fencing by default on sun target. When hw-fencing is enabled by default, hw-fencing can be disabled or re-enabled at runtime through: "fastboot oem set-hw-fence-value [0 or 1]". Change-Id: Ic9987ce0a0c006c845895be48f80280666795624 Signed-off-by: Grace An --- hw_fence/sun_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/hw_fence/sun_defconfig b/hw_fence/sun_defconfig index b39eb5efbe..ba00c1a470 100644 --- a/hw_fence/sun_defconfig +++ b/hw_fence/sun_defconfig @@ -1,2 +1,3 @@ CONFIG_QTI_HW_FENCE=y CONFIG_QTI_HW_FENCE_USE_SYNX=y +CONFIG_QTI_ENABLE_HW_FENCE_DEFAULT=y \ No newline at end of file From 67afe8033e1c6b51f0fe59995589e2061d18e7c4 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 14 May 2024 09:46:42 -0700 Subject: [PATCH 142/166] mm-drivers: hw_fence: fix issue in ipa synx_initialize Currently, IPA client initialization fails if one client queue is specified in device-tree. Support disabling updates_rxq properties for clients specified to have only one client queue. Change-Id: I7e1f82dc3c7352084a1e57a08d4c07d1a0b58195 Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 4e7a817048..3b3b57a87b 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -143,16 +143,16 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, hw_fence_ipcc_txq_update_needs_ipc_irq(hw_fence_drv_data, client_id); hw_fence_client->queues_num = hw_fence_utils_get_queues_num(hw_fence_drv_data, client_id); - if (!hw_fence_client->queues_num || (hw_fence_client->update_rxq && - hw_fence_client->queues_num < HW_FENCE_CLIENT_QUEUES) || - (!hw_fence_client->update_rxq && hw_fence_client->signaled_update_rxq)) { - HWFNC_ERR("client:%d invalid q_num:%d for updates_rxq:%s signaled_update_rxq:%s\n", - client_id, hw_fence_client->queues_num, - hw_fence_client->update_rxq ? "true" : "false", - hw_fence_client->signaled_update_rxq ? "true" : "false"); + if (!hw_fence_client->queues_num) { + HWFNC_ERR("client:%d invalid q_num:%d\n", client_id, hw_fence_client->queues_num); ret = -EINVAL; goto error; } + if (hw_fence_client->queues_num < HW_FENCE_CLIENT_QUEUES) { + hw_fence_client->update_rxq = false; + hw_fence_client->signaled_update_rxq = false; + } + hw_fence_client->skip_fctl_ref = hw_fence_utils_get_skip_fctl_ref(hw_fence_drv_data, client_id); @@ -183,6 +183,12 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, hw_fence_client->ipc_signal_id, hw_fence_client->ipc_client_vid, hw_fence_client->ipc_client_pid); + HWFNC_DBG_INIT("update_rxq:%s signaled update_rxq:%s send_ipc:%s txq_update_send_ipc:%s\n", + hw_fence_client->update_rxq ? "true" : "false", + hw_fence_client->signaled_update_rxq ? "true" : "false", + hw_fence_client->signaled_send_ipc ? "true" : "false", + hw_fence_client->txq_update_send_ipc ? "true" : "false"); + #if IS_ENABLED(CONFIG_DEBUG_FS) init_waitqueue_head(&hw_fence_client->wait_queue); #endif /* CONFIG_DEBUG_FS */ From 0da301ff8653ce678fd87d22cf4a960c4f9c5e6c Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 6 Jun 2024 09:59:06 -0700 Subject: [PATCH 143/166] mm-drivers: hw_fence: use kref_get_unless_zero to avoid race Use kref_get_unless_zero instead of kref_get for hw-fence clients to avoid race condition with msm_hw_fence_deregister called concurrently where kref_put will decrease the refcount to zero and free the client, but kref_get still increases, and we get a client that is used after free. Change-Id: I8123726d1c7d11fef25acb532da748b2e6e36f83 Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 3b3b57a87b..4a988ae675 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -90,8 +90,8 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, /* Avoid race condition if multiple-threads request same client at same time */ mutex_lock(&hw_fence_drv_data->clients_register_lock); - if (hw_fence_drv_data->clients[client_id]) { - kref_get(&hw_fence_drv_data->clients[client_id]->kref); + if (hw_fence_drv_data->clients[client_id] && + kref_get_unless_zero(&hw_fence_drv_data->clients[client_id]->kref)) { mutex_unlock(&hw_fence_drv_data->clients_register_lock); HWFNC_DBG_INIT("client with id %d already registered\n", client_id); kfree(hw_fence_client); From b94fd402b6e3c66249f2027882b613ccc1f596f4 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 7 Nov 2023 15:09:21 -0800 Subject: [PATCH 144/166] mm-drivers: hw_fence: add support to read and write ctrl queue Add support for HW Fence Driver to read and write to ctrl rx and tx queues respectively. Change-Id: I31eb0e30cb96892f90dbd5d75606d6ae5bd9f4b8 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 3 + hw_fence/src/hw_fence_drv_priv.c | 61 ++++++++++--------- hw_fence/src/hw_fence_drv_utils.c | 90 ++++++++++++++++------------ 3 files changed, 88 insertions(+), 66 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index a6d83f3e7f..4cf0b2adae 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -625,6 +625,9 @@ int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, u64 flags, u64 client_data, u32 error, int queue_type); +int hw_fence_update_queue_helper(struct hw_fence_driver_data *drv_data, u32 client_id, + struct msm_hw_fence_queue *queue, u16 type, u64 ctxt_id, u64 seqno, u64 hash, u64 flags, + u64 client_data, u32 error, int queue_type); int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 hash, u32 error); inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data); diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 7c0ea56b9b..3112ebdfba 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -359,15 +359,10 @@ static int _get_update_queue_params(struct hw_fence_driver_data *drv_data, return 0; } -/* - * This function writes to the queue of the client. The 'queue_type' determines - * if this function is writing to the rx or tx queue - */ -int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, - u64 flags, u64 client_data, u32 error, int queue_type) +int hw_fence_update_queue_helper(struct hw_fence_driver_data *drv_data, u32 client_id, + struct msm_hw_fence_queue *queue, u16 type, u64 ctxt_id, u64 seqno, u64 hash, u64 flags, + u64 client_data, u32 error, int queue_type) { - struct msm_hw_fence_queue *queue; u32 read_idx; u32 write_idx; u32 to_write_idx; @@ -382,17 +377,9 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, u32 *rd_idx_ptr, *wr_ptr; int ret = 0; - if (queue_type >= hw_fence_client->queues_num) { - HWFNC_ERR("Invalid queue type:%d client_id:%d q_num:%d\n", queue_type, - hw_fence_client->client_id, hw_fence_client->queues_num); - return -EINVAL; - } - - queue = &hw_fence_client->queues[queue_type]; if (_get_update_queue_params(drv_data, queue, &q_size_u32, &payload_size, &payload_size_u32, &rd_idx_ptr, &wr_ptr)) { - HWFNC_ERR("Invalid client:%d q_type:%d queue\n", hw_fence_client->client_id, - queue_type); + HWFNC_ERR("Invalid client:%d q_type:%d queue\n", client_id, queue_type); return -EINVAL; } @@ -403,15 +390,14 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, */ lock_client = _lock_client_queue(queue_type); if (lock_client) { - lock_idx = (hw_fence_client->client_id - 1) * HW_FENCE_LOCK_IDX_OFFSET; + lock_idx = (client_id - 1) * HW_FENCE_LOCK_IDX_OFFSET; if (lock_idx >= drv_data->client_lock_tbl_cnt) { HWFNC_ERR("can't reset rxq, lock for client:%d lock_idx:%d exceed max:%d\n", - hw_fence_client->client_id, lock_idx, - drv_data->client_lock_tbl_cnt); + client_id, lock_idx, drv_data->client_lock_tbl_cnt); return -EINVAL; } - HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx); + HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", client_id, lock_idx); /* lock the client rx queue to update */ GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 1); /* lock */ @@ -425,8 +411,8 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, write_idx = readl_relaxed(wr_ptr); HWFNC_DBG_Q("wr client:%d r_ptr:0x%pK w_ptr:0x%pK r_idx:%d w_idx:%d q:0x%pK type:%d s:%s\n", - hw_fence_client->client_id, rd_idx_ptr, wr_ptr, read_idx, write_idx, queue, - queue_type, queue->skip_wr_idx ? "true" : "false"); + client_id, rd_idx_ptr, wr_ptr, read_idx, write_idx, queue, queue_type, + queue->skip_wr_idx ? "true" : "false"); /* translate read and write indexes from custom indexing to dwords with no offset */ _translate_queue_indexes_custom_to_default(queue, &read_idx, &write_idx); @@ -452,9 +438,8 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, HWFNC_DBG_Q("to_write_idx:%u write_idx:%u payload_size:%u\n", to_write_idx, write_idx, payload_size_u32); - HWFNC_DBG_L("client_id:%d update %s hash:%llu ctx_id:%llu seqno:%llu flags:%llu error:%u\n", - hw_fence_client->client_id, _get_queue_type(queue_type), - hash, ctxt_id, seqno, flags, error); + HWFNC_DBG_L("client_id:%d update %s type:%u hash:%llu ctx:%llu seq:%llu flags:%llu e:%u\n", + client_id, _get_queue_type(queue_type), type, hash, ctxt_id, seqno, flags, error); /* * wrap-around case, here we are writing to the last element of the queue, therefore @@ -473,7 +458,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, /* Update Client Queue */ writeq_relaxed(payload_size, &write_ptr_payload->size); - writew_relaxed(HW_FENCE_PAYLOAD_TYPE_1, &write_ptr_payload->type); + writew_relaxed(type, &write_ptr_payload->type); writew_relaxed(HW_FENCE_PAYLOAD_REV(1, 0), &write_ptr_payload->version); writeq_relaxed(ctxt_id, &write_ptr_payload->ctxt_id); writeq_relaxed(seqno, &write_ptr_payload->seqno); @@ -501,6 +486,28 @@ exit: return ret; } +/* + * This function writes to the queue of the client. The 'queue_type' determines + * if this function is writing to the rx or tx queue + */ +int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, + u64 flags, u64 client_data, u32 error, int queue_type) +{ + struct msm_hw_fence_queue *queue; + + if (queue_type >= hw_fence_client->queues_num) { + HWFNC_ERR("Invalid queue type:%d client_id:%d q_num:%d\n", queue_type, + hw_fence_client->client_id, hw_fence_client->queues_num); + return -EINVAL; + } + queue = &hw_fence_client->queues[queue_type]; + + return hw_fence_update_queue_helper(drv_data, hw_fence_client->client_id, queue, + HW_FENCE_PAYLOAD_TYPE_1, ctxt_id, seqno, hash, flags, client_data, error, + queue_type); +} + int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 hash, u32 error) { diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 98d0562827..4a4ed44906 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -300,55 +300,67 @@ exit: return ret; } -static int _process_fence_error_client_loopback(struct hw_fence_driver_data *drv_data, - int db_flag_id) +static int _process_fence_error_payload(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_queue_payload *payload) { struct msm_hw_fence_client *hw_fence_client; - struct msm_hw_fence_queue_payload payload; - int i, cb_ret, ret = 0, read = 1; u32 client_id; + int ret; + + if (!drv_data || !payload || payload->type != HW_FENCE_PAYLOAD_TYPE_2) { + HWFNC_ERR("invalid drv_data:0x%pK payload:0x%pK type:%d expected type:%d\n", + drv_data, payload, payload ? payload->type : -1, HW_FENCE_PAYLOAD_TYPE_2); + return -EINVAL; + } + + if (payload->client_data < HW_FENCE_CLIENT_ID_CTX0 || + payload->client_data >= drv_data->clients_num) { + HWFNC_ERR("read invalid client_id:%llu from ctrl rxq min:%u max:%u\n", + payload->client_data, HW_FENCE_CLIENT_ID_CTX0, + drv_data->clients_num); + return -EINVAL; + } + + client_id = payload->client_data; + HWFNC_DBG_Q("ctrl rxq rd: h:%llu ctx:%llu seq:%llu f:%llu e:%u client:%u\n", payload->hash, + payload->ctxt_id, payload->seqno, payload->flags, payload->error, client_id); + + hw_fence_client = drv_data->clients[client_id]; + if (!hw_fence_client) { + HWFNC_ERR("processing fence error cb for unregistered client_id:%u\n", + client_id); + return -EINVAL; + } + + ret = hw_fence_utils_fence_error_cb(hw_fence_client, payload->ctxt_id, + payload->seqno, payload->hash, payload->flags, payload->error); + if (ret) + HWFNC_ERR("fence_error_cb failed for client:%u ctx:%llu seq:%llu err:%u\n", + client_id, payload->ctxt_id, payload->seqno, payload->error); + + return ret; +} + +static int _process_ctrl_rx_queue(struct hw_fence_driver_data *drv_data) +{ + struct msm_hw_fence_queue_payload payload; + int i, ret = 0, read = 1; for (i = 0; read && i < HW_FENCE_MAX_ITER_READ; i++) { read = hw_fence_read_queue_helper(drv_data, &drv_data->ctrl_queues[HW_FENCE_RX_QUEUE - 1], &payload); if (read < 0) { - HWFNC_DBG_Q("unable to read ctrl rxq for db_flag_id:%d\n", db_flag_id); + HWFNC_DBG_Q("unable to read ctrl rxq\n"); return read; } - if (payload.type != HW_FENCE_PAYLOAD_TYPE_2) { - HWFNC_ERR("unsupported payload type in ctrl rxq received:%u expected:%u\n", - payload.type, HW_FENCE_PAYLOAD_TYPE_2); + switch (payload.type) { + case HW_FENCE_PAYLOAD_TYPE_2: + ret = _process_fence_error_payload(drv_data, &payload); + break; + default: + HWFNC_ERR("received unexpected ctrl queue payload type:%d\n", payload.type); ret = -EINVAL; - continue; - } - if (payload.client_data < HW_FENCE_CLIENT_ID_CTX0 || - payload.client_data >= drv_data->clients_num) { - HWFNC_ERR("read invalid client_id:%llu from ctrl rxq min:%u max:%u\n", - payload.client_data, HW_FENCE_CLIENT_ID_CTX0, - drv_data->clients_num); - ret = -EINVAL; - continue; - } - - client_id = payload.client_data; - HWFNC_DBG_Q("ctrl rxq rd: it:%d h:%llu ctx:%llu seq:%llu f:%llu e:%u client:%u\n", - i, payload.hash, payload.ctxt_id, payload.seqno, payload.flags, - payload.error, client_id); - - hw_fence_client = drv_data->clients[client_id]; - if (!hw_fence_client) { - HWFNC_ERR("processing fence error cb for unregistered client_id:%u\n", - client_id); - ret = -EINVAL; - continue; - } - - cb_ret = hw_fence_utils_fence_error_cb(hw_fence_client, payload.ctxt_id, - payload.seqno, payload.hash, payload.flags, payload.error); - if (cb_ret) { - HWFNC_ERR("fence_error_cb failed for client:%u ctx:%llu seq:%llu err:%u\n", - client_id, payload.ctxt_id, payload.seqno, payload.error); - ret = cb_ret; + break; } } @@ -362,7 +374,7 @@ static int _process_signaled_client_id(struct hw_fence_driver_data *drv_data, in HWFNC_DBG_H("Processing signaled client mask id:%d\n", client_id); switch (client_id) { case HW_FENCE_CLIENT_ID_CTRL_QUEUE: - ret = _process_fence_error_client_loopback(drv_data, client_id); + ret = _process_ctrl_rx_queue(drv_data); break; #if IS_ENABLED(CONFIG_DEBUG_FS) case HW_FENCE_CLIENT_ID_VAL0: From 5513bf907e906453c87eeb5077500d6fb3a2b676 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 7 Nov 2023 16:23:46 -0800 Subject: [PATCH 145/166] mm-drivers: hw_fence: inform soccp of memory mapping via ctrl queue This changes adds ctrl queue read/write as part of initialization process as follows: As part of probe, the HW Fence Driver maps the memory and then writes to the ctrl tx queue and sends an IPCC signal to SOCCP. When the HW Fence Driver receives the acknowledgment message from SOCCP through the ctrl rx queue, the HW Fence Driver internally sets the variable that allows clients to start fence creation. Change-Id: I4421ce56c1a741ccd6ea549364ccccebe7892f88 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 4 +- hw_fence/src/hw_fence_drv_utils.c | 96 ++++++++++++++++++++++------ 2 files changed, 80 insertions(+), 20 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 8f20beda2d..36ba5822a0 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -177,10 +177,12 @@ struct msm_hw_fence_queue { * enum payload_type - Enum with the queue payload types. * HW_FENCE_PAYLOAD_TYPE_1: client queue payload * HW_FENCE_PAYLOAD_TYPE_2: ctrl queue payload for fence error; client_data stores client_id + * HW_FENCE_PAYLOAD_TYPE_3: ctrl queue payload for memory sharing */ enum payload_type { HW_FENCE_PAYLOAD_TYPE_1 = 1, - HW_FENCE_PAYLOAD_TYPE_2 + HW_FENCE_PAYLOAD_TYPE_2, + HW_FENCE_PAYLOAD_TYPE_3 }; /** diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 7840c524cd..af42817918 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -345,6 +345,29 @@ static int _process_fence_error_payload(struct hw_fence_driver_data *drv_data, return ret; } +static int _process_init_soccp_payload(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_queue_payload *payload) +{ + int ret; + + if (!drv_data || !drv_data->has_soccp || !payload || + payload->type != HW_FENCE_PAYLOAD_TYPE_3) { + HWFNC_ERR("invalid drv_data:0x%pK has_soccp:%d payload:0x%pK type:%d expected:%d\n", + drv_data, drv_data ? drv_data->has_soccp : -1, payload, + payload ? payload->type : -1, HW_FENCE_PAYLOAD_TYPE_3); + return -EINVAL; + } + + HWFNC_DBG_INIT("Received ctrlq msg that soccp is initialized\n"); + drv_data->fctl_ready = true; + + ret = hw_fence_utils_set_power_vote(drv_data, false); + if (ret) + HWFNC_ERR("failed to remove power vote used to send ctrl queue message\n"); + + return ret; +} + static int _process_ctrl_rx_queue(struct hw_fence_driver_data *drv_data) { struct msm_hw_fence_queue_payload payload; @@ -361,6 +384,9 @@ static int _process_ctrl_rx_queue(struct hw_fence_driver_data *drv_data) case HW_FENCE_PAYLOAD_TYPE_2: ret = _process_fence_error_payload(drv_data, &payload); break; + case HW_FENCE_PAYLOAD_TYPE_3: + ret = _process_init_soccp_payload(drv_data, &payload); + break; default: HWFNC_ERR("received unexpected ctrl queue payload type:%d\n", payload.type); ret = -EINVAL; @@ -507,6 +533,41 @@ static int hw_fence_soccp_listener(void *data) return 0; } +static int _send_ctrl_txq_msg(struct hw_fence_driver_data *drv_data, u32 payload_type) +{ + struct msm_hw_fence_queue *queue; + int ret; + + ret = hw_fence_utils_set_power_vote(drv_data, true); + if (ret) { + HWFNC_ERR("failed to set power vote to send ctrlq message ret:%d\n", ret); + return -EINVAL; + } + + /* soccp may fail to wake up during hw-fence driver probe */ + if (!drv_data->soccp_props.is_awake) { + HWFNC_DBG_INFO("rproc_set_state call failed to wake up soccp\n"); + ret = hw_fence_utils_set_power_vote(drv_data, false); + if (ret) + HWFNC_ERR("failed to remove power vote for ctrlq msg ret:%d\n", ret); + + return -EINVAL; + } + + queue = &drv_data->ctrl_queues[HW_FENCE_TX_QUEUE - 1]; + ret = hw_fence_update_queue_helper(drv_data, 0, queue, payload_type, 0, 0, 0, + 0, 0, 0, HW_FENCE_TX_QUEUE - 1); + if (ret) { + HWFNC_ERR("unable to update ctrl txq message\n"); + return ret; + } + + hw_fence_ipcc_trigger_signal(drv_data, drv_data->ipcc_client_pid, drv_data->ipcc_fctl_vid, + hw_fence_ipcc_get_signal_id(drv_data, 0)); + + return ret; +} + int hw_fence_utils_init_soccp_irq(struct hw_fence_driver_data *drv_data) { struct platform_device *pdev; @@ -613,13 +674,7 @@ int hw_fence_utils_set_power_vote(struct hw_fence_driver_data *drv_data, bool st return 0; /* do not expose failures of power vote to client */ } #else -static int _set_intended_soccp_state(struct hw_fence_soccp *soccp_props) -{ - HWFNC_ERR("Kernel version does not support SOCCP power votes\n"); - return -EINVAL; -} - -int hw_fence_utils_set_power_vote(struct hw_fence_soccp *soccp_props, bool state) +int hw_fence_utils_set_power_vote(struct hw_fence_driver_data *drv_data, bool state) { HWFNC_ERR("Kernel version does not support SOCCP power votes\n"); return -EINVAL; @@ -628,7 +683,7 @@ int hw_fence_utils_set_power_vote(struct hw_fence_soccp *soccp_props, bool state static int _set_soccp_rproc(struct hw_fence_soccp *soccp_props, phandle ph) { - int ret; + int ret = 0; mutex_lock(&soccp_props->rproc_lock); if (IS_ERR_OR_NULL(soccp_props->rproc)) @@ -638,8 +693,6 @@ static int _set_soccp_rproc(struct hw_fence_soccp *soccp_props, phandle ph) if (!ret) ret = -EINVAL; soccp_props->rproc = NULL; - } else { - ret = _set_intended_soccp_state(soccp_props); } mutex_unlock(&soccp_props->rproc_lock); @@ -649,6 +702,8 @@ static int _set_soccp_rproc(struct hw_fence_soccp *soccp_props, phandle ph) static int hw_fence_notify_ssr(struct notifier_block *nb, unsigned long action, void *data) { struct hw_fence_soccp *soccp_props = container_of(nb, struct hw_fence_soccp, ssr_nb); + struct hw_fence_driver_data *drv_data = container_of(soccp_props, + struct hw_fence_driver_data, soccp_props); struct qcom_ssr_notify_data *notify_data = data; int ret; @@ -664,6 +719,10 @@ static int hw_fence_notify_ssr(struct notifier_block *nb, unsigned long action, HWFNC_ERR("failed getting soccp_rproc:0x%pK ph:%d usage_cnt:0x%x ret:%d\n", soccp_props->rproc, soccp_props->rproc_ph, refcount_read(&soccp_props->usage_cnt), ret); + /* inform soccp of ctrl queue updates once it is up; this will set a power vote */ + ret = _send_ctrl_txq_msg(drv_data, HW_FENCE_PAYLOAD_TYPE_3); + if (ret) + HWFNC_ERR("failed to send ctrlq message for bootup event\n"); break; case QCOM_SSR_BEFORE_SHUTDOWN: HWFNC_DBG_SSR("received soccp %s event\n", notify_data->crashed ? "crashed" : @@ -690,6 +749,7 @@ int hw_fence_utils_register_soccp_ssr_notifier(struct hw_fence_driver_data *drv_ { void *notifier; struct hw_fence_soccp *soccp_props; + int ret; if (!drv_data || !drv_data->has_soccp) { HWFNC_ERR("invalid drv_data:0x%pK has_soccp:%d\n", drv_data, @@ -710,6 +770,11 @@ int hw_fence_utils_register_soccp_ssr_notifier(struct hw_fence_driver_data *drv_ soccp_props->ssr_notifier = notifier; HWFNC_DBG_SSR("registered for soccp ssr notification notifier:0x%pK\n", notifier); + /* if soccp is already up, do initial bootup here; this first attempt may fail */ + ret = _send_ctrl_txq_msg(drv_data, HW_FENCE_PAYLOAD_TYPE_3); + if (ret) + HWFNC_DBG_INFO("can't send ctrl tx queue msg to inform soccp of mem map\n"); + return 0; } @@ -926,19 +991,12 @@ static int _init_soccp_mem(struct hw_fence_driver_data *drv_data) ret = iommu_map(domain, shbuf_soccp_va, drv_data->res.start, drv_data->size, IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE); #endif - if (ret) { + if (ret) HWFNC_ERR("failed to map for soccp smmu phys_addr:0x%llx va:0x%x sz:%lx ret:%d\n", drv_data->res.start, shbuf_soccp_va, drv_data->size, ret); - } else { - /* - * HW Fence Driver resources may not be ready at this point (this is separately - * tracked via resources_ready), but we assume soccp is ready once memory mapping - * is done. - */ - drv_data->fctl_ready = true; + else HWFNC_DBG_INIT("mapped for soccp smmu phys_addr:0x%llx va:0x%x sz:%lx ret:%d\n", drv_data->res.start, shbuf_soccp_va, drv_data->size, ret); - } return ret; } From 1fcc80dd3979f4ef913a544c1a43807aa6c633f8 Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 19 Apr 2024 16:58:19 -0700 Subject: [PATCH 146/166] mm-drivers: hw_fence: register for soccp ssr notification Add functionality to register for soccp ssr notification. Since rproc struct changes following ssr, call rproc_get_by_phandle again after notification that soccp is after power up and set power vote accordingly based on usage count. Change-Id: Idef3057ed9d84e8f201c4f22406a6ec8a8ec319a Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_debug.h | 4 + hw_fence/include/hw_fence_drv_priv.h | 26 +++- hw_fence/include/hw_fence_drv_utils.h | 18 +++ hw_fence/src/hw_fence_drv_priv.c | 8 ++ hw_fence/src/hw_fence_drv_utils.c | 182 +++++++++++++++++++++++++- hw_fence/src/msm_hw_fence.c | 14 +- 6 files changed, 240 insertions(+), 12 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index dd3856d3e8..3979554840 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -17,6 +17,7 @@ enum hw_fence_drv_prio { HW_FENCE_LUT = 0x000020, /* Look-up and algorithm logs */ HW_FENCE_IRQ = 0x000040, /* Interrupt-related messages */ HW_FENCE_LOCK = 0x000080, /* Lock-related messages */ + HW_FENCE_SSR = 0x0000100, /* SSR-related messages */ HW_FENCE_PRINTK = 0x010000, }; @@ -62,6 +63,9 @@ extern u32 msm_hw_fence_debug_level; #define HWFNC_DBG_LOCK(fmt, ...) \ dprintk(HW_FENCE_LOCK, "[hwfence_dbglock:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) +#define HWFNC_DBG_SSR(fmt, ...) \ + dprintk(HW_FENCE_SSR, "[hwfence_dbgssr:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) + #define HWFNC_DBG_DUMP(prio, fmt, ...) \ dprintk(prio, "[hwfence_dbgd:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 4cf0b2adae..8f20beda2d 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -340,6 +340,28 @@ struct hw_fence_signal_cb { u64 hash; }; +/** + * struct hw_fence_soccp - Structure holding hw-fence data specific to soccp + * @rproc_ph: phandle for soccp rproc object used to set power vote + * @rproc: soccp rproc object used to set power vote + * @rproc_lock: lock to synchronization modifications to soccp rproc data structure and state + * @is_awake: true if HW Fence Driver has successfully set a power vote on soccp that has not been + * removed by SSR; false if soccp has not set a power vote, successfully removed its power vote, + * or soccp has crashed + * @usage_cnt: independent counter of number of users of SOCCP, 1 if no one is using + * @ssr_nb: notifier block used for soccp ssr + * @ssr_notifier: soccp ssr notifier + */ +struct hw_fence_soccp { + phandle rproc_ph; + struct rproc *rproc; + struct mutex rproc_lock; + bool is_awake; + refcount_t usage_cnt; + struct notifier_block ssr_nb; + void *ssr_notifier; +}; + /** * struct hw_fence_driver_data - Structure holding internal hw-fence driver data * @@ -396,11 +418,11 @@ struct hw_fence_signal_cb { * @ipcc_val_initialized: flag to indicate if val is initialized * @dma_fence_table_lock: lock to synchronize access to dma-fence table * @dma_fence_table: table with internal dma-fences for hw-fences - * @soccp_rproc: soccp rproc object used to set power vote * @has_soccp: flag to indicate if soccp is present (otherwise vm is used) * @soccp_listener_thread: thread that processes interrupts received from soccp * @soccp_wait_queue: wait queue to notify soccp_listener_thread of new interrupts * @signaled_clients_mask: mask to track signals received from soccp by hw-fence driver + * @soccp_props: soccp-specific properties for ssr and power votes */ struct hw_fence_driver_data { @@ -495,11 +517,11 @@ struct hw_fence_driver_data { DECLARE_HASHTABLE(dma_fence_table, DMA_FENCE_HASH_TABLE_BIT); /* soccp is present */ - struct rproc *soccp_rproc; bool has_soccp; struct task_struct *soccp_listener_thread; wait_queue_head_t soccp_wait_queue; atomic_t signaled_clients_mask; + struct hw_fence_soccp soccp_props; }; /** diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index fb13db57dd..f48db2affb 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -64,6 +64,14 @@ int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data); */ int hw_fence_utils_init_soccp_irq(struct hw_fence_driver_data *drv_data); +/** + * hw_fence_utils_register_soccp_ssr_notifier() - registers rproc ssr notifier for soccp + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_register_soccp_ssr_notifier(struct hw_fence_driver_data *drv_data); + /** * hw_fence_utils_process_signaled_clients_mask() - Process the mask containing HW Fence client IDs * that HW Fence Driver is responsible for, i.e. @@ -185,4 +193,14 @@ int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int cli */ int hw_fence_utils_get_skip_fctl_ref(struct hw_fence_driver_data *drv_data, int client_id); + +/** + * hw_fence_utils_set_power_vote() - Sets the power vote for soccp. + * + * @drv_data: driver data + * @state: power state to set + * + * Returns: 0 if successful, error if not + */ +int hw_fence_utils_set_power_vote(struct hw_fence_driver_data *drv_data, bool state); #endif /* __HW_FENCE_DRV_UTILS_H */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 3112ebdfba..6fe013c73f 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -771,6 +771,14 @@ int hw_fence_init(struct hw_fence_driver_data *drv_data) goto exit; } + if (drv_data->has_soccp) { + ret = hw_fence_utils_register_soccp_ssr_notifier(drv_data); + if (ret) { + HWFNC_ERR("failed to register for soccp ssr notification\n"); + goto exit; + } + } + hw_fence_dma_fence_init_hash_table(drv_data); mem = drv_data->io_mem_base; diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 4a4ed44906..7840c524cd 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -21,6 +21,10 @@ #include #include #include +#if (KERNEL_VERSION(6, 1, 25) <= LINUX_VERSION_CODE) +#include +#endif +#include #include "hw_fence_drv_priv.h" #include "hw_fence_drv_utils.h" @@ -543,6 +547,172 @@ int hw_fence_utils_init_soccp_irq(struct hw_fence_driver_data *drv_data) return ret; } +#if (KERNEL_VERSION(6, 1, 25) <= LINUX_VERSION_CODE) +/* + * This is called to set soccp power vote based off internal counter of soccp power votes. + * This must be called with rproc_lock held + */ +static int _set_intended_soccp_state(struct hw_fence_soccp *soccp_props) +{ + bool intended_state; + int ret; + + intended_state = (refcount_read(&soccp_props->usage_cnt) > 1); + + if (intended_state == soccp_props->is_awake) + return 0; + + /* cannot call soccp power vote because soccp has crashed */ + if (IS_ERR_OR_NULL(soccp_props->rproc)) { + HWFNC_DBG_SSR("Cannot set power vote before after_powerup notification\n"); + return -EINVAL; + } + + ret = rproc_set_state(soccp_props->rproc, intended_state); + if (!ret) + soccp_props->is_awake = intended_state; + + return ret; +} + +int hw_fence_utils_set_power_vote(struct hw_fence_driver_data *drv_data, bool state) +{ + struct hw_fence_soccp *soccp_props; + bool prev_state, cur_state; + int ret; + + if (!drv_data || !drv_data->has_soccp) { + HWFNC_ERR("invalid params: drv_data:0x%pK has_soccp:%d state:%d\n", drv_data, + drv_data ? drv_data->has_soccp : -1, state); + return -EINVAL; + } + + soccp_props = &drv_data->soccp_props; + mutex_lock(&soccp_props->rproc_lock); + if (state) { + refcount_inc(&soccp_props->usage_cnt); + } else { + if (refcount_read(&soccp_props->usage_cnt) == 1) { + mutex_unlock(&soccp_props->rproc_lock); + HWFNC_ERR("removing usage cnt that was never set\n"); + + return -EINVAL; + } + refcount_dec(&soccp_props->usage_cnt); + } + + prev_state = soccp_props->is_awake; + ret = _set_intended_soccp_state(soccp_props); + cur_state = soccp_props->is_awake; + + mutex_unlock(&soccp_props->rproc_lock); + + HWFNC_DBG_L("Set power vote prev:%d curr:%d req_state:%d votes:0x%x ret:%d\n", + prev_state, cur_state, state, refcount_read(&soccp_props->usage_cnt), ret); + + return 0; /* do not expose failures of power vote to client */ +} +#else +static int _set_intended_soccp_state(struct hw_fence_soccp *soccp_props) +{ + HWFNC_ERR("Kernel version does not support SOCCP power votes\n"); + return -EINVAL; +} + +int hw_fence_utils_set_power_vote(struct hw_fence_soccp *soccp_props, bool state) +{ + HWFNC_ERR("Kernel version does not support SOCCP power votes\n"); + return -EINVAL; +} +#endif + +static int _set_soccp_rproc(struct hw_fence_soccp *soccp_props, phandle ph) +{ + int ret; + + mutex_lock(&soccp_props->rproc_lock); + if (IS_ERR_OR_NULL(soccp_props->rproc)) + soccp_props->rproc = rproc_get_by_phandle(ph); + if (IS_ERR_OR_NULL(soccp_props->rproc)) { + ret = PTR_ERR(soccp_props->rproc); + if (!ret) + ret = -EINVAL; + soccp_props->rproc = NULL; + } else { + ret = _set_intended_soccp_state(soccp_props); + } + mutex_unlock(&soccp_props->rproc_lock); + + return ret; +} + +static int hw_fence_notify_ssr(struct notifier_block *nb, unsigned long action, void *data) +{ + struct hw_fence_soccp *soccp_props = container_of(nb, struct hw_fence_soccp, ssr_nb); + struct qcom_ssr_notify_data *notify_data = data; + int ret; + + switch (action) { + case QCOM_SSR_BEFORE_POWERUP: + HWFNC_DBG_SSR("received soccp starting event\n"); + break; + case QCOM_SSR_AFTER_POWERUP: + HWFNC_DBG_SSR("received soccp running event\n"); + /* rproc must be available after power up notification */ + ret = _set_soccp_rproc(soccp_props, soccp_props->rproc_ph); + if (ret) + HWFNC_ERR("failed getting soccp_rproc:0x%pK ph:%d usage_cnt:0x%x ret:%d\n", + soccp_props->rproc, soccp_props->rproc_ph, + refcount_read(&soccp_props->usage_cnt), ret); + break; + case QCOM_SSR_BEFORE_SHUTDOWN: + HWFNC_DBG_SSR("received soccp %s event\n", notify_data->crashed ? "crashed" : + "stopping"); + break; + case QCOM_SSR_AFTER_SHUTDOWN: + HWFNC_DBG_SSR("received soccp offline event\n"); + mutex_lock(&soccp_props->rproc_lock); + if (!IS_ERR_OR_NULL(soccp_props->rproc)) + rproc_put(soccp_props->rproc); + soccp_props->rproc = NULL; + soccp_props->is_awake = false; + mutex_unlock(&soccp_props->rproc_lock); + break; + default: + HWFNC_ERR("received unrecognized event %lu\n", action); + break; + } + + return NOTIFY_OK; +} + +int hw_fence_utils_register_soccp_ssr_notifier(struct hw_fence_driver_data *drv_data) +{ + void *notifier; + struct hw_fence_soccp *soccp_props; + + if (!drv_data || !drv_data->has_soccp) { + HWFNC_ERR("invalid drv_data:0x%pK has_soccp:%d\n", drv_data, + drv_data ? drv_data->has_soccp : -1); + return -EINVAL; + } + soccp_props = &drv_data->soccp_props; + + mutex_init(&soccp_props->rproc_lock); + refcount_set(&soccp_props->usage_cnt, 1); + soccp_props->ssr_nb.priority = 1; /* higher value indicates higher priority */ + soccp_props->ssr_nb.notifier_call = hw_fence_notify_ssr; + notifier = qcom_register_ssr_notifier("soccp", &soccp_props->ssr_nb); + if (IS_ERR(notifier)) { + HWFNC_ERR("failed to register soccp ssr notifier\n"); + return PTR_ERR(notifier); + } + soccp_props->ssr_notifier = notifier; + HWFNC_DBG_SSR("registered for soccp ssr notification notifier:0x%pK\n", notifier); + + return 0; +} + static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, gh_vmid_t self, gh_vmid_t peer) { @@ -1226,15 +1396,17 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) int ret; size_t size; u32 val = 0; - phandle ph; + struct hw_fence_soccp *soccp_props = &drv_data->soccp_props; /* check presence of soccp */ - ret = of_property_read_u32(drv_data->dev->of_node, "soccp_controller", &ph); + ret = of_property_read_u32(drv_data->dev->of_node, "soccp_controller", + &soccp_props->rproc_ph); if (!ret) { drv_data->has_soccp = true; - drv_data->soccp_rproc = rproc_get_by_phandle(ph); - if (IS_ERR_OR_NULL(drv_data->soccp_rproc)) { - HWFNC_DBG_INFO("failed to find rproc for phandle:%u\n", ph); + soccp_props->rproc = rproc_get_by_phandle(soccp_props->rproc_ph); + if (IS_ERR_OR_NULL(soccp_props->rproc)) { + HWFNC_DBG_INFO("failed to find rproc for phandle:%u\n", + soccp_props->rproc_ph); return -EPROBE_DEFER; } } diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 0758e08ef6..adb5e134d1 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -31,11 +31,7 @@ static int _set_power_vote_if_needed(struct hw_fence_driver_data *drv_data, #if IS_ENABLED(CONFIG_DEBUG_FS) if (drv_data->has_soccp && client_id >= HW_FENCE_CLIENT_ID_VAL0 && client_id <= HW_FENCE_CLIENT_ID_VAL6) { -#if (KERNEL_VERSION(6, 1, 25) <= LINUX_VERSION_CODE) - ret = rproc_set_state(drv_data->soccp_rproc, state); -#else - ret = -EINVAL; -#endif + ret = hw_fence_utils_set_power_vote(drv_data, state); } #endif /* CONFIG_DEBUG_FS */ @@ -879,6 +875,8 @@ err_exit: static int msm_hw_fence_remove(struct platform_device *pdev) { + struct hw_fence_soccp *soccp_props; + HWFNC_DBG_H("+\n"); if (!pdev) { @@ -891,6 +889,12 @@ static int msm_hw_fence_remove(struct platform_device *pdev) HWFNC_ERR("null driver data\n"); return -EINVAL; } + soccp_props = &hw_fence_drv_data->soccp_props; + if (soccp_props->ssr_notifier) { + if (qcom_unregister_ssr_notifier(soccp_props->ssr_notifier, + &soccp_props->ssr_nb)) + HWFNC_ERR("failed to unregister soccp ssr notifier\n"); + } /* indicate listener thread should stop listening for interrupts from soccp */ hw_fence_drv_data->has_soccp = false; From 480173c246686c9ecd1470a405c4d75f8c089f08 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 11 Jun 2024 12:00:09 -0700 Subject: [PATCH 147/166] mm-drivers: hw_fence: add synx hwfence enable resources stub function Add stub function to enable compilation with new Synx API Change-Id: Ife49fb93d6f32de8cee538a0c05f57a9daffe6dc Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence_synx_translation.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 28e6a8a823..37cc04c11c 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -488,3 +488,10 @@ int synx_hwfence_init_ops(struct synx_ops *hwfence_ops) return SYNX_SUCCESS; } EXPORT_SYMBOL_GPL(synx_hwfence_init_ops); + +int synx_hwfence_enable_resources(enum synx_client_id id, enum synx_resource_type resource, + bool enable) +{ + return -SYNX_INVALID; +} +EXPORT_SYMBOL_GPL(synx_hwfence_enable_resources); From f0176d649798a4f183f89fc856f3b51f221ff06a Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 21 Feb 2024 11:27:06 -0800 Subject: [PATCH 148/166] mm-drivers: hw_fence: add support to set fctl_ready based on soccp ssr Add support to register and deregister a notification for soccp ssr with rproc driver. This change ensures that when the SOCCP is going to stop or crash, the HW Fence Driver will pause fence creation, signaling, etc., failing such external api calls. After the SOCCP powers back up, the HW Fence Driver will resume normal processing. Change-Id: I24d3061668ebc90b51dae286cd3a5615e54d2734 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 5 + hw_fence/src/hw_fence_drv_interop.c | 17 ++ hw_fence/src/hw_fence_drv_priv.c | 42 +++++ hw_fence/src/hw_fence_drv_utils.c | 2 + hw_fence/src/msm_hw_fence.c | 177 +++++++++---------- hw_fence/src/msm_hw_fence_synx_translation.c | 4 + 6 files changed, 155 insertions(+), 92 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 36ba5822a0..16d98edd69 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -687,4 +687,9 @@ struct dma_fence *hw_fence_internal_dma_fence_create(struct hw_fence_driver_data struct dma_fence *hw_fence_dma_fence_find(struct hw_fence_driver_data *drv_data, u64 hash, bool incr_refcount); +/* internal checks used by msm_hw_fence and synx_hwfence functions */ +int hw_fence_check_hw_fence_driver(struct hw_fence_driver_data *drv_data); +int hw_fence_check_valid_client(struct hw_fence_driver_data *drv_data, void *client_handle); +int hw_fence_check_valid_fctl(struct hw_fence_driver_data *drv_data, void *client_handle); + #endif /* __HW_FENCE_DRV_INTERNAL_H */ diff --git a/hw_fence/src/hw_fence_drv_interop.c b/hw_fence/src/hw_fence_drv_interop.c index c0a4601d9b..bc6c2f7250 100644 --- a/hw_fence/src/hw_fence_drv_interop.c +++ b/hw_fence/src/hw_fence_drv_interop.c @@ -58,6 +58,9 @@ int hw_fence_interop_to_synx_status(int hw_fence_status_code) case -EBUSY: synx_status_code = -SYNX_BUSY; break; + case -EAGAIN: + synx_status_code = -SYNX_EAGAIN; + break; default: synx_status_code = hw_fence_status_code; break; @@ -240,6 +243,15 @@ int hw_fence_interop_share_handle_status(struct synx_import_indv_params *params, bool is_signaled; u32 error; + ret = hw_fence_check_hw_fence_driver(hw_fence_drv_data); + if (ret) + return hw_fence_interop_to_synx_status(ret); + + if (!hw_fence_drv_data->fctl_ready) { + HWFNC_ERR("fctl in invalid state, cannot perform operation\n"); + return -SYNX_EAGAIN; + } + if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->new_h_synx) || !(params->flags & SYNX_IMPORT_DMA_FENCE) || (params->flags & SYNX_IMPORT_SYNX_FENCE) || IS_ERR_OR_NULL(params->fence)) { @@ -301,6 +313,11 @@ end: void *hw_fence_interop_get_fence(u32 h_synx) { struct dma_fence *fence; + int ret; + + ret = hw_fence_check_hw_fence_driver(hw_fence_drv_data); + if (ret) + return ERR_PTR(hw_fence_interop_to_synx_status(ret)); if (!(h_synx & SYNX_HW_FENCE_HANDLE_FLAG)) { HWFNC_ERR("invalid h_synx:%u does not have hw-fence handle bit set:%lu\n", diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 6fe013c73f..0728a3b71a 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -2536,3 +2536,45 @@ error: return ret; } + +int hw_fence_check_hw_fence_driver(struct hw_fence_driver_data *drv_data) +{ + if (IS_ERR_OR_NULL(drv_data) || !drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -EINVAL; + } + + return 0; +} + +int hw_fence_check_valid_client(struct hw_fence_driver_data *drv_data, void *client_handle) +{ + int ret; + + ret = hw_fence_check_hw_fence_driver(drv_data); + if (ret) + return ret; + + if (IS_ERR_OR_NULL(client_handle)) { + HWFNC_ERR("Invalid client\n"); + return -EINVAL; + } + + return 0; +} + +int hw_fence_check_valid_fctl(struct hw_fence_driver_data *drv_data, void *client_handle) +{ + int ret; + + ret = hw_fence_check_valid_client(drv_data, client_handle); + if (ret) + return ret; + + if (!drv_data->fctl_ready) { + HWFNC_ERR("fctl in invalid state, cannot perform operation\n"); + return -EAGAIN; + } + + return 0; +} diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index af42817918..b1748ed26c 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -727,6 +727,8 @@ static int hw_fence_notify_ssr(struct notifier_block *nb, unsigned long action, case QCOM_SSR_BEFORE_SHUTDOWN: HWFNC_DBG_SSR("received soccp %s event\n", notify_data->crashed ? "crashed" : "stopping"); + /* disallow fence creation, signaling, etc. when soccp is going to stop or crash */ + drv_data->fctl_ready = false; break; case QCOM_SSR_AFTER_SHUTDOWN: HWFNC_DBG_SSR("received soccp offline event\n"); diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index adb5e134d1..511ee3ea4a 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -57,10 +57,9 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, HWFNC_DBG_H("++ client_id_ext:%d\n", client_id_ext); - if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { - HWFNC_ERR("hw fence driver not ready\n"); - return ERR_PTR(-EAGAIN); - } + ret = hw_fence_check_hw_fence_driver(hw_fence_drv_data); + if (ret) + return ERR_PTR(ret); if (client_id_ext >= HW_FENCE_CLIENT_MAX) { HWFNC_ERR("Invalid client_id_ext:%d\n", client_id_ext); @@ -204,10 +203,10 @@ int msm_hw_fence_deregister(void *client_handle) u32 client_id; int ret = 0; - if (IS_ERR_OR_NULL(client_handle)) { - HWFNC_ERR("Invalid client handle\n"); - return -EINVAL; - } + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + hw_fence_client = (struct msm_hw_fence_client *)client_handle; client_id = hw_fence_client->client_id_ext; @@ -241,16 +240,15 @@ int msm_hw_fence_create(void *client_handle, struct dma_fence *fence; int ret; - if (IS_ERR_OR_NULL(client_handle) || !params || !params->handle) { + ret = hw_fence_check_valid_fctl(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + if (!params || !params->handle) { HWFNC_ERR("Invalid input\n"); return -EINVAL; } - if (!hw_fence_drv_data->fctl_ready) { - HWFNC_DBG_H("VM not ready, cannot create fence\n"); - return -EAGAIN; - } - HWFNC_DBG_H("+\n"); hw_fence_client = (struct msm_hw_fence_client *)client_handle; @@ -318,7 +316,11 @@ int msm_hw_fence_destroy(void *client_handle, struct dma_fence_array *array; int ret; - if (IS_ERR_OR_NULL(client_handle) || !fence) { + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + if (!fence) { HWFNC_ERR("Invalid data\n"); return -EINVAL; } @@ -367,10 +369,10 @@ int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle) struct msm_hw_fence_client *hw_fence_client; int ret; - if (IS_ERR_OR_NULL(client_handle)) { - HWFNC_ERR("Invalid data\n"); - return -EINVAL; - } + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + hw_fence_client = (struct msm_hw_fence_client *)client_handle; if (hw_fence_client->client_id >= hw_fence_drv_data->clients_num) { @@ -403,16 +405,15 @@ int msm_hw_fence_wait_update_v2(void *client_handle, int i, j, destroy_ret, ret = 0; enum hw_fence_client_data_id data_id; - if (IS_ERR_OR_NULL(client_handle) || !fence_list || !*fence_list) { + ret = hw_fence_check_valid_fctl(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + if (!fence_list || !*fence_list) { HWFNC_ERR("Invalid data\n"); return -EINVAL; } - if (!hw_fence_drv_data->fctl_ready) { - HWFNC_DBG_H("VM not ready, cannot destroy fence\n"); - return -EAGAIN; - } - hw_fence_client = (struct msm_hw_fence_client *)client_handle; data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext); if (client_data_list && data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) { @@ -506,17 +507,11 @@ int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) { struct msm_hw_fence_client *hw_fence_client; struct msm_hw_fence *hw_fences_tbl; - int i; + int i, ret; - if (IS_ERR_OR_NULL(client_handle)) { - HWFNC_ERR("Invalid client handle!\n"); - return -EINVAL; - } - - if (!hw_fence_drv_data->fctl_ready) { - HWFNC_DBG_H("VM not ready, cannot reset client\n"); - return -EAGAIN; - } + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; hw_fence_client = (struct msm_hw_fence_client *)client_handle; hw_fences_tbl = hw_fence_drv_data->hw_fences_tbl; @@ -535,6 +530,11 @@ EXPORT_SYMBOL_GPL(msm_hw_fence_reset_client); int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id_ext, u32 reset_flags) { enum hw_fence_client_id client_id; + int ret; + + ret = hw_fence_check_hw_fence_driver(hw_fence_drv_data); + if (ret) + return ret; if (client_id_ext >= HW_FENCE_CLIENT_MAX) { HWFNC_ERR("Invalid client_id_ext:%d\n", client_id_ext); @@ -556,15 +556,15 @@ EXPORT_SYMBOL_GPL(msm_hw_fence_reset_client_by_id); int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error) { struct msm_hw_fence_client *hw_fence_client; + int ret; - if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready || - !hw_fence_drv_data->fctl_ready) { - HWFNC_ERR("hw fence driver or vm not ready\n"); - return -EAGAIN; - } else if (IS_ERR_OR_NULL(client_handle) || - (handle >= hw_fence_drv_data->hw_fences_tbl_cnt)) { - HWFNC_ERR("Invalid handle:%llu or client handle:%d max:%d\n", handle, - IS_ERR_OR_NULL(client_handle), hw_fence_drv_data->hw_fences_tbl_cnt); + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + if (handle >= hw_fence_drv_data->hw_fences_tbl_cnt) { + HWFNC_ERR("Invalid handle:%llu max:%d\n", handle, + hw_fence_drv_data->hw_fences_tbl_cnt); return -EINVAL; } hw_fence_client = (struct msm_hw_fence_client *)client_handle; @@ -583,17 +583,19 @@ EXPORT_SYMBOL_GPL(msm_hw_fence_update_txq); int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, u32 update_flags) { struct msm_hw_fence_client *hw_fence_client; + int ret; - if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready || - !hw_fence_drv_data->fctl_ready) { - HWFNC_ERR("hw fence driver or vm not ready\n"); - return -EAGAIN; - } else if (IS_ERR_OR_NULL(client_handle) || - (handle >= hw_fence_drv_data->hw_fences_tbl_cnt) || !error) { - HWFNC_ERR("Invalid client_handle:0x%pK or fence handle:%llu max:%d or error:%d\n", - client_handle, handle, hw_fence_drv_data->hw_fences_tbl_cnt, error); + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + if ((handle >= hw_fence_drv_data->hw_fences_tbl_cnt) || !error) { + HWFNC_ERR("Invalid fence handle:%llu max:%d or error:%d\n", + handle, hw_fence_drv_data->hw_fences_tbl_cnt, error); return -EINVAL; - } else if (update_flags != MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE) { + } + + if (update_flags != MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE) { HWFNC_ERR("invalid flags:0x%x expected:0x%lx no support of in-place error update\n", update_flags, MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE); return -EINVAL; @@ -614,15 +616,12 @@ int msm_hw_fence_trigger_signal(void *client_handle, u32 signal_id) { struct msm_hw_fence_client *hw_fence_client; + int ret; + + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; - if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready - || !hw_fence_drv_data->fctl_ready) { - HWFNC_ERR("hw fence driver or vm not ready\n"); - return -EAGAIN; - } else if (IS_ERR_OR_NULL(client_handle)) { - HWFNC_ERR("Invalid client\n"); - return -EINVAL; - } hw_fence_client = (struct msm_hw_fence_client *)client_handle; HWFNC_DBG_H("sending ipc for client:%d\n", hw_fence_client->client_id); @@ -636,13 +635,14 @@ EXPORT_SYMBOL_GPL(msm_hw_fence_trigger_signal); int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t cb, void *data) { struct msm_hw_fence_client *hw_fence_client; + int ret; - if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { - HWFNC_ERR("hw fence driver not ready\n"); - return -EAGAIN; - } else if (IS_ERR_OR_NULL(client_handle) || IS_ERR_OR_NULL(cb) || IS_ERR_OR_NULL(data)) { - HWFNC_ERR("Invalid params client:0x%pK cb_func:0x%pK data:0x%pK\n", client_handle, - cb, data); + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + if (IS_ERR_OR_NULL(cb) || IS_ERR_OR_NULL(data)) { + HWFNC_ERR("Invalid params cb_func:0x%pK data:0x%pK\n", cb, data); return -EINVAL; } @@ -666,13 +666,9 @@ int msm_hw_fence_deregister_error_cb(void *client_handle) struct msm_hw_fence_client *hw_fence_client; int ret = 0; - if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { - HWFNC_ERR("hw fence driver not ready\n"); - return -EAGAIN; - } else if (IS_ERR_OR_NULL(client_handle)) { - HWFNC_ERR("Invalid client: 0x%pK\n", client_handle); - return -EINVAL; - } + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; hw_fence_client = (struct msm_hw_fence_client *)client_handle; if (!mutex_trylock(&hw_fence_client->error_cb_lock)) { @@ -704,15 +700,12 @@ EXPORT_SYMBOL_GPL(msm_hw_fence_deregister_error_cb); int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, u32 dump_clients_mask) { struct msm_hw_fence_client *hw_fence_client; - int client_id; + int client_id, ret; + + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; - if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { - HWFNC_ERR("hw fence driver not ready\n"); - return -EAGAIN; - } else if (IS_ERR_OR_NULL(client_handle)) { - HWFNC_ERR("Invalid client handle:%d\n", IS_ERR_OR_NULL(client_handle)); - return -EINVAL; - } hw_fence_client = (struct msm_hw_fence_client *)client_handle; if (dump_flags & MSM_HW_FENCE_DBG_DUMP_QUEUES) { @@ -742,14 +735,13 @@ int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence) struct msm_hw_fence_client *hw_fence_client; struct msm_hw_fence *hw_fence; u64 hash; + int ret; - if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { - HWFNC_ERR("hw fence driver not ready\n"); - return -EAGAIN; - } else if (IS_ERR_OR_NULL(client_handle)) { - HWFNC_ERR("Invalid client handle:%d\n", IS_ERR_OR_NULL(client_handle)); - return -EINVAL; - } else if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle); + if (ret) + return ret; + + if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { HWFNC_ERR("DMA Fence is not a HW Fence ctx:%llu seqno:%llu flags:0x%lx\n", fence->context, fence->seqno, fence->flags); return -EINVAL; @@ -775,10 +767,11 @@ EXPORT_SYMBOL_GPL(msm_hw_fence_dump_fence); /* Function used for simulation purposes only. */ int msm_hw_fence_driver_doorbell_sim(u64 db_mask) { - if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { - HWFNC_ERR("hw fence driver not ready\n"); - return -EAGAIN; - } + int ret; + + ret = hw_fence_check_hw_fence_driver(hw_fence_drv_data); + if (ret) + return ret; HWFNC_DBG_IRQ("db callback sim-mode flags:0x%llx qtime:%llu\n", db_mask, hw_fence_get_qtime(hw_fence_drv_data)); diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 37cc04c11c..c26ff9d60f 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -322,6 +322,10 @@ static int synx_hwfence_import_fence(void *client, struct synx_import_indv_param u64 handle; int ret, i; + ret = hw_fence_check_valid_fctl(hw_fence_drv_data, client); + if (ret) + return hw_fence_interop_to_synx_status(ret); + fence = (struct dma_fence *)params->fence; array = to_dma_fence_array(fence); if (array) { From 7c8ee6fbb1a7364a10326226639f1743d8686a15 Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 22 Apr 2024 13:21:41 -0700 Subject: [PATCH 149/166] mm-drivers: hw_fence: add support to signal soccp after ssr Add support to signal a ctrl txq payload for soccp to clean up client queues. Wait in ssr notification callback until soccp responds with payload in ctrl rxq. Change-Id: Ieb520aac92e0e8e7ba69b19647082e5304b55342 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 8 +++- hw_fence/src/hw_fence_drv_utils.c | 66 ++++++++++++++++++++++++---- 2 files changed, 65 insertions(+), 9 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 16d98edd69..0c1c7e87a1 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -178,11 +178,13 @@ struct msm_hw_fence_queue { * HW_FENCE_PAYLOAD_TYPE_1: client queue payload * HW_FENCE_PAYLOAD_TYPE_2: ctrl queue payload for fence error; client_data stores client_id * HW_FENCE_PAYLOAD_TYPE_3: ctrl queue payload for memory sharing + * HW_FENCE_PAYLOAD_TYPE_4: ctrl queue payload for soccp ssr */ enum payload_type { HW_FENCE_PAYLOAD_TYPE_1 = 1, HW_FENCE_PAYLOAD_TYPE_2, - HW_FENCE_PAYLOAD_TYPE_3 + HW_FENCE_PAYLOAD_TYPE_3, + HW_FENCE_PAYLOAD_TYPE_4 }; /** @@ -353,6 +355,8 @@ struct hw_fence_signal_cb { * @usage_cnt: independent counter of number of users of SOCCP, 1 if no one is using * @ssr_nb: notifier block used for soccp ssr * @ssr_notifier: soccp ssr notifier + * @ssr_wait_queue: wait queue to notify ssr callback that a payload has been received from soccp + * @ssr_cnt: counts number of times soccp has restarted, zero if initial boot-up */ struct hw_fence_soccp { phandle rproc_ph; @@ -362,6 +366,8 @@ struct hw_fence_soccp { refcount_t usage_cnt; struct notifier_block ssr_nb; void *ssr_notifier; + wait_queue_head_t ssr_wait_queue; + u32 ssr_cnt; }; /** diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index b1748ed26c..1cf6e12182 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -95,6 +95,12 @@ */ #define HW_FENCE_MAX_ITER_READ 100 +/** + * HW_FENCE_SOCCP_INIT_TIMEOUT_MS: + * Timeout in ms for hw-fence driver delay of ssr callback while waiting for soccp response message + */ +#define HW_FENCE_SOCCP_INIT_TIMEOUT_MS 50 + /** * HW_FENCE_MAX_EVENTS: * Maximum number of HW Fence debug events @@ -177,6 +183,21 @@ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] true, false}, }; +#define hw_fence_wait_event_timeout(waitq, cond, timeout_ms, ret) \ + do { \ + ktime_t cur_ktime; \ + ktime_t exp_ktime; \ + s64 wait_time_jiffies = msecs_to_jiffies(timeout_ms); \ +\ + exp_ktime = ktime_add_ms(ktime_get(), timeout_ms); \ + do { \ + ret = wait_event_timeout(waitq, cond, \ + wait_time_jiffies); \ + cur_ktime = ktime_get(); \ + } while ((!cond) && (ret == 0) && \ + (ktime_compare(ktime_sub(exp_ktime, cur_ktime), ktime_set(0, 0)) > 0));\ + } while (0) + static void _lock(uint64_t *wait) { #if defined(__aarch64__) @@ -348,18 +369,28 @@ static int _process_fence_error_payload(struct hw_fence_driver_data *drv_data, static int _process_init_soccp_payload(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_queue_payload *payload) { + struct hw_fence_soccp *soccp_props; int ret; if (!drv_data || !drv_data->has_soccp || !payload || - payload->type != HW_FENCE_PAYLOAD_TYPE_3) { + !(payload->type == HW_FENCE_PAYLOAD_TYPE_3 || + payload->type == HW_FENCE_PAYLOAD_TYPE_4)) { HWFNC_ERR("invalid drv_data:0x%pK has_soccp:%d payload:0x%pK type:%d expected:%d\n", drv_data, drv_data ? drv_data->has_soccp : -1, payload, payload ? payload->type : -1, HW_FENCE_PAYLOAD_TYPE_3); return -EINVAL; } - HWFNC_DBG_INIT("Received ctrlq msg that soccp is initialized\n"); + soccp_props = &drv_data->soccp_props; + if (payload->type == HW_FENCE_PAYLOAD_TYPE_4 && !soccp_props->ssr_cnt) { + HWFNC_ERR("incorrectly received type:%d when ssr error is not happening\n", + payload->type); + return -EINVAL; + } + + HWFNC_DBG_INIT("Received ctrlq msg type:%d that soccp is initialized\n", payload->type); drv_data->fctl_ready = true; + wake_up_all(&soccp_props->ssr_wait_queue); ret = hw_fence_utils_set_power_vote(drv_data, false); if (ret) @@ -385,6 +416,7 @@ static int _process_ctrl_rx_queue(struct hw_fence_driver_data *drv_data) ret = _process_fence_error_payload(drv_data, &payload); break; case HW_FENCE_PAYLOAD_TYPE_3: + case HW_FENCE_PAYLOAD_TYPE_4: ret = _process_init_soccp_payload(drv_data, &payload); break; default: @@ -705,7 +737,8 @@ static int hw_fence_notify_ssr(struct notifier_block *nb, unsigned long action, struct hw_fence_driver_data *drv_data = container_of(soccp_props, struct hw_fence_driver_data, soccp_props); struct qcom_ssr_notify_data *notify_data = data; - int ret; + u32 payload_type; + int ret = 0; switch (action) { case QCOM_SSR_BEFORE_POWERUP: @@ -720,15 +753,30 @@ static int hw_fence_notify_ssr(struct notifier_block *nb, unsigned long action, soccp_props->rproc, soccp_props->rproc_ph, refcount_read(&soccp_props->usage_cnt), ret); /* inform soccp of ctrl queue updates once it is up; this will set a power vote */ - ret = _send_ctrl_txq_msg(drv_data, HW_FENCE_PAYLOAD_TYPE_3); - if (ret) + payload_type = (soccp_props->ssr_cnt) ? HW_FENCE_PAYLOAD_TYPE_4 : + HW_FENCE_PAYLOAD_TYPE_3; + ret = _send_ctrl_txq_msg(drv_data, payload_type); + if (ret) { HWFNC_ERR("failed to send ctrlq message for bootup event\n"); + goto end; + } + + hw_fence_wait_event_timeout(soccp_props->ssr_wait_queue, drv_data->fctl_ready, + HW_FENCE_SOCCP_INIT_TIMEOUT_MS, ret); + if (drv_data->fctl_ready) { + HWFNC_DBG_SSR("soccp returned payload in ctrl rxq\n"); + ret = 0; + } else { + HWFNC_ERR("failed to receive ctrlq message for bootup event ret:%d\n", ret); + ret = -EINVAL; + } break; case QCOM_SSR_BEFORE_SHUTDOWN: - HWFNC_DBG_SSR("received soccp %s event\n", notify_data->crashed ? "crashed" : - "stopping"); + HWFNC_DBG_SSR("received soccp %s event ssr_cnt:%d\n", notify_data->crashed ? + "crashed" : "stopping", soccp_props->ssr_cnt); /* disallow fence creation, signaling, etc. when soccp is going to stop or crash */ drv_data->fctl_ready = false; + soccp_props->ssr_cnt++; break; case QCOM_SSR_AFTER_SHUTDOWN: HWFNC_DBG_SSR("received soccp offline event\n"); @@ -744,7 +792,8 @@ static int hw_fence_notify_ssr(struct notifier_block *nb, unsigned long action, break; } - return NOTIFY_OK; +end: + return ret ? NOTIFY_BAD : NOTIFY_OK; } int hw_fence_utils_register_soccp_ssr_notifier(struct hw_fence_driver_data *drv_data) @@ -762,6 +811,7 @@ int hw_fence_utils_register_soccp_ssr_notifier(struct hw_fence_driver_data *drv_ mutex_init(&soccp_props->rproc_lock); refcount_set(&soccp_props->usage_cnt, 1); + init_waitqueue_head(&soccp_props->ssr_wait_queue); soccp_props->ssr_nb.priority = 1; /* higher value indicates higher priority */ soccp_props->ssr_nb.notifier_call = hw_fence_notify_ssr; notifier = qcom_register_ssr_notifier("soccp", &soccp_props->ssr_nb); From e81a989336481a70773935cffd604ed0bab4ad8c Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 29 Apr 2024 14:21:05 -0700 Subject: [PATCH 150/166] mm-drivers: hw_fence: add power vote api for soccp Add api to set and remove power votes from HW Fence Driver. The HW Fence Driver internally sets/removes power votes from the rproc driver as needed for subsystem restart. Change-Id: I8bac9e427dadef7eec07447b03101115e5d69328 Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence_synx_translation.c | 26 +++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index 37cc04c11c..b45d6c1320 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -10,6 +10,7 @@ #include #include "msm_hw_fence.h" #include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" #include "hw_fence_drv_debug.h" #include "hw_fence_drv_interop.h" @@ -492,6 +493,29 @@ EXPORT_SYMBOL_GPL(synx_hwfence_init_ops); int synx_hwfence_enable_resources(enum synx_client_id id, enum synx_resource_type resource, bool enable) { - return -SYNX_INVALID; + int ret; + + if (!hw_fence_driver_enable) + return -SYNX_INVALID; + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -SYNX_INVALID; + } + + if (!is_hw_fence_client(id) || !(resource == SYNX_RESOURCE_SOCCP)) { + HWFNC_ERR("enabling hw-fence resources for invalid client id:%d res:%d enable:%d\n", + id, resource, enable); + return -SYNX_INVALID; + } + + if (!hw_fence_drv_data->has_soccp) + return SYNX_SUCCESS; + + ret = hw_fence_utils_set_power_vote(hw_fence_drv_data, enable); + if (ret) + HWFNC_ERR("Failed to vote for SOCCP state:%d\n", enable); + + return hw_fence_interop_to_synx_status(ret); } EXPORT_SYMBOL_GPL(synx_hwfence_enable_resources); From eadc309783b9d5b9fb172cae8bb90f1797d20409 Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 13 Jun 2024 16:47:26 -0700 Subject: [PATCH 151/166] Revert "mm-drivers: hw_fence: enable hw-fencing by default on sun target" This reverts commit a7df8b84bb86f43857d01d2b970e1d13a1f3d8f4. Reason for revert: Disabling hw-fencing temporarily Change-Id: I125d5459db6444a5079e46d69d0458df72c57e31 Signed-off-by: Grace An --- hw_fence/sun_defconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/hw_fence/sun_defconfig b/hw_fence/sun_defconfig index ba00c1a470..b39eb5efbe 100644 --- a/hw_fence/sun_defconfig +++ b/hw_fence/sun_defconfig @@ -1,3 +1,2 @@ CONFIG_QTI_HW_FENCE=y CONFIG_QTI_HW_FENCE_USE_SYNX=y -CONFIG_QTI_ENABLE_HW_FENCE_DEFAULT=y \ No newline at end of file From 5bb27ddd6fea90f3bcbd42066a637f1750b6caf6 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 19 Mar 2024 15:10:31 -0700 Subject: [PATCH 152/166] mm-drivers: hw_fence: enable hw-fencing by default on sun target Add target-based Kconfig to enable hw-fencing by default and enable hw-fencing by default on sun target. When hw-fencing is enabled by default, hw-fencing can be disabled or re-enabled at runtime through: "fastboot oem set-hw-fence-value [0 or 1]". Change-Id: Ic9987ce0a0c006c845895be48f80280666795624 Signed-off-by: Grace An (cherry picked from commit a7df8b84bb86f43857d01d2b970e1d13a1f3d8f4) --- hw_fence/sun_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/hw_fence/sun_defconfig b/hw_fence/sun_defconfig index b39eb5efbe..ba00c1a470 100644 --- a/hw_fence/sun_defconfig +++ b/hw_fence/sun_defconfig @@ -1,2 +1,3 @@ CONFIG_QTI_HW_FENCE=y CONFIG_QTI_HW_FENCE_USE_SYNX=y +CONFIG_QTI_ENABLE_HW_FENCE_DEFAULT=y \ No newline at end of file From 350d010afd3ef0e4e81aac120ee9652cbc51a1f2 Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 13 Jun 2024 16:47:26 -0700 Subject: [PATCH 153/166] Revert "mm-drivers: hw_fence: enable hw-fencing by default on sun target" This reverts commit a7df8b84bb86f43857d01d2b970e1d13a1f3d8f4. Reason for revert: Disabling hw-fencing temporarily Change-Id: I125d5459db6444a5079e46d69d0458df72c57e31 Signed-off-by: Grace An (cherry picked from commit eadc309783b9d5b9fb172cae8bb90f1797d20409) --- hw_fence/sun_defconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/hw_fence/sun_defconfig b/hw_fence/sun_defconfig index ba00c1a470..b39eb5efbe 100644 --- a/hw_fence/sun_defconfig +++ b/hw_fence/sun_defconfig @@ -1,3 +1,2 @@ CONFIG_QTI_HW_FENCE=y CONFIG_QTI_HW_FENCE_USE_SYNX=y -CONFIG_QTI_ENABLE_HW_FENCE_DEFAULT=y \ No newline at end of file From 5660f2ae3a0ee90264e55e9bccd322c373f353e7 Mon Sep 17 00:00:00 2001 From: Aditya Shirsat Date: Wed, 19 Jun 2024 05:59:11 -0700 Subject: [PATCH 154/166] mm-drivers: hw_fence: Fix compilation error Due to kernel upgrade, include missing header file: linux/platform_device.h. Change-Id: I2f086b1364e87461b9d15196536842b0296e9c14 Signed-off-by: Aditya Shirsat --- hw_fence/src/hw_fence_drv_utils.c | 1 + 1 file changed, 1 insertion(+) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 1cf6e12182..a359f8603a 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -25,6 +25,7 @@ #include #endif #include +#include #include "hw_fence_drv_priv.h" #include "hw_fence_drv_utils.h" From 6e59cf9087a5ea6dc70459362828ceffcafa13eb Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 18 Apr 2024 11:51:15 -0700 Subject: [PATCH 155/166] mm-drivers: hw_fence: add support to clean up table for soccp ssr Add support to clean up hw-fence table as part of soccp ssr support. Change-Id: Ice342bbd28216726efcc34ef01831173f76d9070 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_debug.h | 4 +- hw_fence/include/hw_fence_drv_priv.h | 2 + hw_fence/src/hw_fence_drv_debug.c | 92 +++++++++++++-------------- hw_fence/src/hw_fence_drv_priv.c | 61 ++++++++++++++++++ hw_fence/src/hw_fence_drv_utils.c | 12 +++- 5 files changed, 122 insertions(+), 49 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index 3979554840..6f447d1ff0 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -74,6 +74,8 @@ extern u32 msm_hw_fence_debug_level; __builtin_return_address(0), ##__VA_ARGS__) int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data); +void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash, + u32 count); #if IS_ENABLED(CONFIG_DEBUG_FS) @@ -84,8 +86,6 @@ int hw_fence_debug_wait_val(struct hw_fence_driver_data *drv_data, void hw_fence_debug_dump_queues(struct hw_fence_driver_data *drv_data, enum hw_fence_drv_prio prio, struct msm_hw_fence_client *hw_fence_client); -void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash, - u32 count); void hw_fence_debug_dump_table(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data); void hw_fence_debug_dump_events(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data); diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 0c1c7e87a1..ef88a68889 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -684,6 +684,8 @@ int hw_fence_get_flags_error(struct hw_fence_driver_data *drv_data, u64 hash, u6 u32 *error); int hw_fence_update_hsynx(struct hw_fence_driver_data *drv_data, u64 hash, u32 h_synx, bool wait_for); +int hw_fence_ssr_cleanup_table(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fences_tbl, u32 table_total_entries, u64 in_flight_lock); /* apis for internally managed dma-fence */ struct dma_fence *hw_dma_fence_init(struct msm_hw_fence_client *hw_fence_client, u64 context, diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 7ea1ad4d94..d91265157a 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -54,6 +54,52 @@ struct client_data { struct list_head list; }; +static void _dump_fence_helper(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, + char *parents_dump, u32 index, u32 count) +{ + char sublist[HW_FENCE_MAX_PARENTS_SUBLIST_DUMP]; + u32 parents_cnt; + int i, len = 0; + + if (!hw_fence || !parents_dump) { + HWFNC_ERR("invalid params hw_fence:0x%pK parents_dump:0x%pK\n", hw_fence, + parents_dump); + return; + } + + memset(parents_dump, 0, sizeof(char) * HW_FENCE_MAX_PARENTS_DUMP); + if (hw_fence->parents_cnt) { + if (hw_fence->parents_cnt > MSM_HW_FENCE_MAX_JOIN_PARENTS) { + HWFNC_ERR("hfence[%u] has invalid parents_cnt:%d greater than max:%d\n", + index, hw_fence->parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS); + parents_cnt = MSM_HW_FENCE_MAX_JOIN_PARENTS; + } else { + parents_cnt = hw_fence->parents_cnt; + } + + memset(sublist, 0, sizeof(sublist)); + for (i = 0; i < parents_cnt; i++) + len += scnprintf(sublist + len, HW_FENCE_MAX_PARENTS_SUBLIST_DUMP - len, + "%llu,", hw_fence->parent_list[i]); + scnprintf(parents_dump, HW_FENCE_MAX_PARENTS_DUMP, " p:[%s]", sublist); + } + + HWFNC_DBG_DUMP(prio, HFENCE_TBL_MSG, + count, index, hw_fence->valid, hw_fence->error, hw_fence->ctx_id, hw_fence->seq_id, + hw_fence->wait_client_mask, hw_fence->fence_allocator, hw_fence->flags, + hw_fence->pending_child_cnt, parents_dump, hw_fence->fence_create_time, + hw_fence->fence_trigger_time, hw_fence->fence_wait_time, hw_fence->refcount, + hw_fence->h_synx); +} + +void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash, + u32 count) +{ + char parents_dump[HW_FENCE_MAX_PARENTS_DUMP]; + + return _dump_fence_helper(prio, hw_fence, parents_dump, hash, count); +} + #if IS_ENABLED(CONFIG_DEBUG_FS) static int _get_debugfs_input_client(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos, @@ -473,52 +519,6 @@ static ssize_t hw_fence_dbg_create_wr(struct file *file, return count; } -static void _dump_fence_helper(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, - char *parents_dump, u32 index, u32 count) -{ - char sublist[HW_FENCE_MAX_PARENTS_SUBLIST_DUMP]; - u32 parents_cnt; - int i, len = 0; - - if (!hw_fence || !parents_dump) { - HWFNC_ERR("invalid params hw_fence:0x%pK parents_dump:0x%pK\n", hw_fence, - parents_dump); - return; - } - - memset(parents_dump, 0, sizeof(char) * HW_FENCE_MAX_PARENTS_DUMP); - if (hw_fence->parents_cnt) { - if (hw_fence->parents_cnt > MSM_HW_FENCE_MAX_JOIN_PARENTS) { - HWFNC_ERR("hfence[%u] has invalid parents_cnt:%d greater than max:%d\n", - index, hw_fence->parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS); - parents_cnt = MSM_HW_FENCE_MAX_JOIN_PARENTS; - } else { - parents_cnt = hw_fence->parents_cnt; - } - - memset(sublist, 0, sizeof(sublist)); - for (i = 0; i < parents_cnt; i++) - len += scnprintf(sublist + len, HW_FENCE_MAX_PARENTS_SUBLIST_DUMP - len, - "%llu,", hw_fence->parent_list[i]); - scnprintf(parents_dump, HW_FENCE_MAX_PARENTS_DUMP, " p:[%s]", sublist); - } - - HWFNC_DBG_DUMP(prio, HFENCE_TBL_MSG, - count, index, hw_fence->valid, hw_fence->error, hw_fence->ctx_id, hw_fence->seq_id, - hw_fence->wait_client_mask, hw_fence->fence_allocator, hw_fence->flags, - hw_fence->pending_child_cnt, parents_dump, hw_fence->fence_create_time, - hw_fence->fence_trigger_time, hw_fence->fence_wait_time, hw_fence->refcount, - hw_fence->h_synx); -} - -void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash, - u32 count) -{ - char parents_dump[HW_FENCE_MAX_PARENTS_DUMP]; - - return _dump_fence_helper(prio, hw_fence, parents_dump, hash, count); -} - static inline int _dump_fence(struct msm_hw_fence *hw_fence, char *buf, int len, int max_size, u32 index, u32 cnt) { diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 0728a3b71a..a0dd654893 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1580,6 +1580,17 @@ int hw_fence_create(struct hw_fence_driver_data *drv_data, ret = -EINVAL; } + /** + * Note: This addresses any race conditions where clients may have been in progress + * creating hw-fences when soccp crashes + */ + if (!drv_data->fctl_ready) { + HWFNC_ERR("unable to create hw-fence while fctl is not in valid state\n"); + hw_fence_destroy_refcount(drv_data, *hash, HW_FENCE_FCTL_REFCOUNT); + hw_fence_destroy_with_hash(drv_data, hw_fence_client, *hash); + return -EAGAIN; + } + if (hw_fence_client->skip_fctl_ref) { ret = hw_fence_destroy_refcount(drv_data, *hash, HW_FENCE_FCTL_REFCOUNT); if (ret) @@ -2578,3 +2589,53 @@ int hw_fence_check_valid_fctl(struct hw_fence_driver_data *drv_data, void *clien return 0; } + +/* unlock the in-flight hw-fence and any locks taken on client rx queue for handling */ +static void unlock_in_flight_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u64 hash, u64 in_flight_lock) +{ + u64 wait_client_mask; + u32 wait_client_id, lock_idx; + + HWFNC_DBG_SSR("unlock in-flight fence locked as 0x%llx\n", hw_fence->lock); + hw_fence_debug_dump_fence(HW_FENCE_SSR, hw_fence, hash, 0); + wait_client_mask = hw_fence->wait_client_mask; + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); + + for (wait_client_id = 0; wait_client_id <= drv_data->rxq_clients_num; wait_client_id++) { + if (wait_client_mask & BIT(wait_client_id)) { + lock_idx = (wait_client_id - 1) * HW_FENCE_LOCK_IDX_OFFSET; + if (drv_data->client_lock_tbl[lock_idx] == in_flight_lock) { + GLOBAL_ATOMIC_STORE(drv_data, + &drv_data->client_lock_tbl[lock_idx], 0); + HWFNC_DBG_SSR("unlock client rxq id:%d locked as 0x%llx\n", + wait_client_id, in_flight_lock); + } + } + } +} + +int hw_fence_ssr_cleanup_table(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fences_tbl, u32 table_total_entries, u64 in_flight_lock) +{ + struct msm_hw_fence *hw_fence; + int i; + + if (!drv_data || !hw_fences_tbl || !in_flight_lock || in_flight_lock == BIT(0)) { + HWFNC_ERR("invalid params drv_data:0x%pK table:0x%pK in_flight_lock:0x%llx", + drv_data, hw_fences_tbl, in_flight_lock); + return -EINVAL; + } + + for (i = 0; i < table_total_entries; i++) { + hw_fence = _get_hw_fence(table_total_entries, hw_fences_tbl, i); + + if (hw_fence->lock == in_flight_lock) { + /* only one fence should be affected by this */ + unlock_in_flight_fence(drv_data, hw_fence, i, in_flight_lock); + } + _signal_fence_if_unsignaled(drv_data, hw_fence, i, MSM_HW_FENCE_ERROR_RESET, false); + } + + return 0; +} diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index a359f8603a..35db576485 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -102,6 +102,12 @@ */ #define HW_FENCE_SOCCP_INIT_TIMEOUT_MS 50 +/** + * HW_FENCE_FCTL_LOCK_VALUE: + * Fence controller sets the hw-fence lock value to this when locking a given fence. + */ +#define HW_FENCE_FCTL_LOCK_VALUE BIT(1) + /** * HW_FENCE_MAX_EVENTS: * Maximum number of HW Fence debug events @@ -242,7 +248,7 @@ static void _unlock_vm(struct hw_fence_driver_data *drv_data, uint64_t *lock) lock_val = *lock; /* Read the lock value */ HWFNC_DBG_LOCK("unlock: lock_val after:0x%llx\n", lock_val); - if (lock_val & 0x2) { /* check if SVM BIT1 is set*/ + if (lock_val & HW_FENCE_FCTL_LOCK_VALUE) { /* check if SVM BIT1 is set*/ /* * SVM is in WFI state, since SVM acquire bit is set * Trigger IRQ to Wake-Up SVM Client @@ -787,6 +793,10 @@ static int hw_fence_notify_ssr(struct notifier_block *nb, unsigned long action, soccp_props->rproc = NULL; soccp_props->is_awake = false; mutex_unlock(&soccp_props->rproc_lock); + ret = hw_fence_ssr_cleanup_table(drv_data, drv_data->hw_fences_tbl, + drv_data->hw_fence_table_entries, HW_FENCE_FCTL_LOCK_VALUE); + if (ret) + HWFNC_ERR("failed to cleanup hw-fence table for soccp ssr\n"); break; default: HWFNC_ERR("received unrecognized event %lu\n", action); From b42a0a8fd93bd09199f20149de9bbf5c50cf25e5 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 12 Jun 2024 10:58:33 -0700 Subject: [PATCH 156/166] mm-drivers: hw_fence: always remove power votes on soccp for ctrlq msg SoCCP sends ctrl rxq messages asynchronously back to HLOS, so HW Fence Driver should not rely on SoCCP to send this message back before removing power vote. Instead, wait for this message with timeout and remove the power vote regardless if message is received or not received by this timeout. Change-Id: I37cfeefba0f799949609fc30af0d0bff9c30c3d0 Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_utils.c | 39 ++++++++++++++++--------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 35db576485..3d208ce9b4 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -377,7 +377,6 @@ static int _process_init_soccp_payload(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_queue_payload *payload) { struct hw_fence_soccp *soccp_props; - int ret; if (!drv_data || !drv_data->has_soccp || !payload || !(payload->type == HW_FENCE_PAYLOAD_TYPE_3 || @@ -399,11 +398,7 @@ static int _process_init_soccp_payload(struct hw_fence_driver_data *drv_data, drv_data->fctl_ready = true; wake_up_all(&soccp_props->ssr_wait_queue); - ret = hw_fence_utils_set_power_vote(drv_data, false); - if (ret) - HWFNC_ERR("failed to remove power vote used to send ctrl queue message\n"); - - return ret; + return 0; } static int _process_ctrl_rx_queue(struct hw_fence_driver_data *drv_data) @@ -572,11 +567,14 @@ static int hw_fence_soccp_listener(void *data) return 0; } -static int _send_ctrl_txq_msg(struct hw_fence_driver_data *drv_data, u32 payload_type) +static int _send_bootup_ctrl_txq_msg(struct hw_fence_driver_data *drv_data, u32 payload_type) { struct msm_hw_fence_queue *queue; int ret; + if (drv_data->fctl_ready) + return 0; + ret = hw_fence_utils_set_power_vote(drv_data, true); if (ret) { HWFNC_ERR("failed to set power vote to send ctrlq message ret:%d\n", ret); @@ -604,6 +602,19 @@ static int _send_ctrl_txq_msg(struct hw_fence_driver_data *drv_data, u32 payload hw_fence_ipcc_trigger_signal(drv_data, drv_data->ipcc_client_pid, drv_data->ipcc_fctl_vid, hw_fence_ipcc_get_signal_id(drv_data, 0)); + /* wait for communication back from soccp with timeout */ + hw_fence_wait_event_timeout(drv_data->soccp_props.ssr_wait_queue, drv_data->fctl_ready, + HW_FENCE_SOCCP_INIT_TIMEOUT_MS, ret); + + ret = hw_fence_utils_set_power_vote(drv_data, false); + if (ret) + HWFNC_ERR("failed to remove power vote for ctrlq msg ret:%d\n", ret); + + if (!drv_data->fctl_ready) { + HWFNC_ERR("failed to receive ctrlq message for bootup event ret:%d\n", ret); + ret = -EINVAL; + } + return ret; } @@ -762,21 +773,11 @@ static int hw_fence_notify_ssr(struct notifier_block *nb, unsigned long action, /* inform soccp of ctrl queue updates once it is up; this will set a power vote */ payload_type = (soccp_props->ssr_cnt) ? HW_FENCE_PAYLOAD_TYPE_4 : HW_FENCE_PAYLOAD_TYPE_3; - ret = _send_ctrl_txq_msg(drv_data, payload_type); + ret = _send_bootup_ctrl_txq_msg(drv_data, payload_type); if (ret) { HWFNC_ERR("failed to send ctrlq message for bootup event\n"); goto end; } - - hw_fence_wait_event_timeout(soccp_props->ssr_wait_queue, drv_data->fctl_ready, - HW_FENCE_SOCCP_INIT_TIMEOUT_MS, ret); - if (drv_data->fctl_ready) { - HWFNC_DBG_SSR("soccp returned payload in ctrl rxq\n"); - ret = 0; - } else { - HWFNC_ERR("failed to receive ctrlq message for bootup event ret:%d\n", ret); - ret = -EINVAL; - } break; case QCOM_SSR_BEFORE_SHUTDOWN: HWFNC_DBG_SSR("received soccp %s event ssr_cnt:%d\n", notify_data->crashed ? @@ -834,7 +835,7 @@ int hw_fence_utils_register_soccp_ssr_notifier(struct hw_fence_driver_data *drv_ HWFNC_DBG_SSR("registered for soccp ssr notification notifier:0x%pK\n", notifier); /* if soccp is already up, do initial bootup here; this first attempt may fail */ - ret = _send_ctrl_txq_msg(drv_data, HW_FENCE_PAYLOAD_TYPE_3); + ret = _send_bootup_ctrl_txq_msg(drv_data, HW_FENCE_PAYLOAD_TYPE_3); if (ret) HWFNC_DBG_INFO("can't send ctrl tx queue msg to inform soccp of mem map\n"); From 1abe4af6f44a1ec078d33c025660fad0029761cc Mon Sep 17 00:00:00 2001 From: Abhijith Desai Date: Fri, 9 Aug 2024 16:55:42 +0530 Subject: [PATCH 157/166] mm-drivers: hw_fence: avoid returning -EPROBE_DEFER By the time that HW Fence Driver probes, the rproc data structure may not be ready; instead, this data structure can be successfully acquired from HW Fence Driver after receiving AFTER_POWERUP notification. To avoid boot-up latency for HW Fence Driver or other client drivers, do not return -EPROBE_DEFER when rproc_get_by_phandle fails during HW Fence Driver probe. Change-Id: Ic3caab1402ffa9b8e2d9621437e02586cf99d60a Signed-off-by: Grace An Signed-off-by: Abhijith Desai --- hw_fence/src/hw_fence_drv_priv.c | 2 +- hw_fence/src/hw_fence_drv_utils.c | 16 ++++++++-------- hw_fence/src/msm_hw_fence.c | 5 ++--- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index a0dd654893..011a83f847 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -707,7 +707,7 @@ int hw_fence_init(struct hw_fence_driver_data *drv_data) ret = hw_fence_utils_parse_dt_props(drv_data); if (ret) { - HWFNC_DBG_INFO("failed to set dt properties\n"); + HWFNC_ERR("failed to set dt properties\n"); goto exit; } diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 3d208ce9b4..7ae6a489b2 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -835,6 +835,13 @@ int hw_fence_utils_register_soccp_ssr_notifier(struct hw_fence_driver_data *drv_ HWFNC_DBG_SSR("registered for soccp ssr notification notifier:0x%pK\n", notifier); /* if soccp is already up, do initial bootup here; this first attempt may fail */ + ret = _set_soccp_rproc(soccp_props, soccp_props->rproc_ph); + if (ret) { + HWFNC_DBG_INFO("failed getting soccp_rproc:0x%pK ph:%d at probe time ret:%d\n", + soccp_props->rproc, soccp_props->rproc_ph, ret); + return 0; + } + ret = _send_bootup_ctrl_txq_msg(drv_data, HW_FENCE_PAYLOAD_TYPE_3); if (ret) HWFNC_DBG_INFO("can't send ctrl tx queue msg to inform soccp of mem map\n"); @@ -1523,15 +1530,8 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) /* check presence of soccp */ ret = of_property_read_u32(drv_data->dev->of_node, "soccp_controller", &soccp_props->rproc_ph); - if (!ret) { + if (!ret && soccp_props->rproc_ph) drv_data->has_soccp = true; - soccp_props->rproc = rproc_get_by_phandle(soccp_props->rproc_ph); - if (IS_ERR_OR_NULL(soccp_props->rproc)) { - HWFNC_DBG_INFO("failed to find rproc for phandle:%u\n", - soccp_props->rproc_ph); - return -EPROBE_DEFER; - } - } ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-table-entries", &val); if (ret || !val) { diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index e8fcff2f96..85d70561e3 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -845,7 +845,7 @@ error: kfree(hw_fence_drv_data); hw_fence_drv_data = (void *) -EPROBE_DEFER; - HWFNC_DBG_INFO("error %d\n", rc); + HWFNC_ERR_ONCE("error %d\n", rc); return rc; } @@ -871,8 +871,7 @@ static int msm_hw_fence_probe(struct platform_device *pdev) return 0; err_exit: - if (rc != -EPROBE_DEFER) - HWFNC_ERR_ONCE("error %d\n", rc); + HWFNC_ERR_ONCE("error %d\n", rc); return rc; } From 09ca6e48d9f335accdf7556808b841f38817eabc Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 1 Aug 2024 12:02:11 -0700 Subject: [PATCH 158/166] mm-drivers: hw_fence: enable waiting for multiple fences Add support for test cases in which validation clients have multiple fences pending to be processed by fence wait. Val client must keep internal signal set if there are still fences pending to be read from the queue. Change-Id: I7125d3369a0bdecb72a42f080c7dde1aa8e7afd6 Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_debug.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index d91265157a..bc8657d5f7 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -1200,6 +1200,13 @@ static long _process_val_signal(struct hw_fence_driver_data *drv_data, if ((fence && payload.ctxt_id == context && payload.seqno == seqno) || (mask && ((mask & hash) == (mask & payload.hash)))) { *error = payload.error; + + if (read > 0) { + HWFNC_DBG_L("Client:%d has non-empty rxq, set val_signal flag\n", + hw_fence_client->client_id); + atomic_set(&hw_fence_client->val_signal, 1); + } + return 0; } } From 9156dc3fc8f94468f6457471c78f6ed9174a91f4 Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 22 Aug 2024 10:27:57 -0700 Subject: [PATCH 159/166] mm-drivers: hw_fence: propagate error from failed rx queue update If Rx queue is full at time that client registers for wait on an already signaled fence, the rx queue will fail to be updated. Currently, this error is ignored. Instead, propagate this error to the client and also remove the client's refcount in this scenario so that the client consistently does not need to invoke synx_release after synx_import call fails. Change-Id: Ic723d20ac5d1a769a5afe3b6ad7efab5191cbc5e Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_priv.c | 38 ++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 011a83f847..8d8c6e82d9 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1751,10 +1751,11 @@ struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, return hw_fence; } -static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, +static int _fence_ctl_signal(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, u64 flags, u64 client_data, u32 error, bool signal_from_import) { + int ret = 0; u32 tx_client_id = drv_data->ipcc_client_pid; /* phys id for tx client */ u32 rx_client_id = hw_fence_client->ipc_client_vid; /* virt id for rx client */ @@ -1762,22 +1763,30 @@ static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, /* Call fence error callback */ if (error && hw_fence_client->fence_error_cb) { - hw_fence_utils_fence_error_cb(hw_fence_client, hw_fence->ctx_id, hw_fence->seq_id, - hash, flags, error); + ret = hw_fence_utils_fence_error_cb(hw_fence_client, hw_fence->ctx_id, + hw_fence->seq_id, hash, flags, error); } else { /* Write to Rx queue */ if (hw_fence_client->signaled_update_rxq || - (hw_fence_client->update_rxq && !signal_from_import)) - hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id, + (hw_fence_client->update_rxq && !signal_from_import)) { + ret = hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id, hw_fence->seq_id, hash, flags, client_data, error, HW_FENCE_RX_QUEUE - 1); + if (ret) { + HWFNC_ERR("Can't update rxq clt:%d h:%llu ctx:%llu sq:%llu e:%d\n", + hw_fence_client ? hw_fence_client->client_id : -1, hash, + hw_fence->ctx_id, hw_fence->seq_id, error); + return ret; + } + } #if IS_ENABLED(CONFIG_DEBUG_FS) /* signal validation clients on targets with vm through custom mechanism */ if (!drv_data->has_soccp && hw_fence_client->client_id >= HW_FENCE_CLIENT_ID_VAL0 && hw_fence_client->client_id <= HW_FENCE_CLIENT_ID_VAL6) { - process_validation_client_loopback(drv_data, hw_fence_client->client_id); - return; + ret = process_validation_client_loopback(drv_data, + hw_fence_client->client_id); + return ret; } #endif /* CONFIG_DEBUG_FS */ @@ -1786,6 +1795,8 @@ static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, hw_fence_client->ipc_signal_id); } + + return ret; } static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data, @@ -2045,6 +2056,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hw_fence; enum hw_fence_client_data_id data_id; bool is_signaled = false; + int destroy_ret, ret = 0; if (client_data) { data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext); @@ -2092,11 +2104,19 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, if (is_signaled) { if (fence != NULL) set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &fence->flags); - _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, *hash, 0, client_data, + ret = _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, *hash, 0, client_data, hw_fence->error, true); + if (ret) { + HWFNC_ERR("failed to signal client:%d for import signaled fence h:%llu\n", + hw_fence_client ? hw_fence_client->client_id : 0xff, *hash); + destroy_ret = hw_fence_destroy_with_hash(drv_data, hw_fence_client, *hash); + if (destroy_ret) + HWFNC_ERR("failed destroy ref for failed import client:%d h:%llu\n", + hw_fence_client ? hw_fence_client->client_id : 0xff, *hash); + } } - return 0; + return ret; } int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, From 64bf8b3fd831adc80504bab626a6911c7831419f Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 19 Aug 2024 13:01:57 -0700 Subject: [PATCH 160/166] mm-drivers: hw_fence: avoid error message when debugfs node is empty Fence controller events and hw-fence table may be empty when attempting to dump these through debugfs node. Downgrade the current error message to debug level. Change-Id: I15925102e70334724a58e2d1cdc7d77ca5f84b2e Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_debug.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index bc8657d5f7..f4e95c1fd4 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -745,9 +745,14 @@ static ssize_t hw_fence_dbg_dump_events_rd(struct file *file, char __user *user_ } HWFNC_DBG_H("-- dump_events: index:%d qtime:%llu\n", index, hw_fence_get_qtime(drv_data)); - if (len <= 0 || len > user_buf_size) { + if (len < 0 || len > user_buf_size) { HWFNC_ERR("len:%d invalid buff size:%zu\n", len, user_buf_size); len = 0; + } + + if (len == 0) { + HWFNC_DBG_H("not printing anything to output because len:0 buf_size:%zu\n", + user_buf_size); goto exit; } @@ -907,9 +912,14 @@ static ssize_t hw_fence_dbg_dump_table_rd(struct file *file, char __user *user_b dump_single_entry(drv_data, buf, &index, max_size) : dump_full_table(drv_data, buf, &index, &cnt, max_size, entry_size); - if (len <= 0 || len > user_buf_size) { - HWFNC_ERR("len:%d invalid buff size:%lu\n", len, user_buf_size); + if (len < 0 || len > user_buf_size) { + HWFNC_ERR("len:%d invalid buff size:%zu\n", len, user_buf_size); len = 0; + } + + if (len == 0) { + HWFNC_DBG_H("not printing anything to output because len:0 buf_size:%zu\n", + user_buf_size); goto exit; } From fe99decada4902a75a3408393f660297525073f8 Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 24 Mar 2023 16:02:17 -0700 Subject: [PATCH 161/166] mm-drivers: hw_fence: extend debugfs support for dumping queues info Extend debugfs support to dump client queues debug info by dumping info in debugfs buf instead of only in serial console. The user must first write client_id of interest to debugfs node and then cat the node to dump client queues info for that client. Change-Id: I31307a40ecb66852d30799d4890b0efde82785fc Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 3 + hw_fence/src/hw_fence_drv_debug.c | 235 ++++++++++++++++++++++----- 2 files changed, 195 insertions(+), 43 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index ef88a68889..184704c1d2 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -258,6 +258,7 @@ struct msm_hw_fence_mem_data { * @entry_rd: flag to indicate if debugfs dumps a single line or table * @context_rd: debugfs setting to indicate which context id to dump * @seqno_rd: debugfs setting to indicate which seqno to dump + * @client_id_rd: debugfs setting to indicate which client queue(s) to dump * @hw_fence_sim_release_delay: delay in micro seconds for the debugfs node that simulates the * hw-fences behavior, to release the hw-fences * @create_hw_fences: boolean to continuosly create hw-fences within debugfs @@ -271,6 +272,7 @@ struct msm_hw_fence_dbg_data { bool entry_rd; u64 context_rd; u64 seqno_rd; + u32 client_id_rd; u32 hw_fence_sim_release_delay; bool create_hw_fences; @@ -661,6 +663,7 @@ int hw_fence_update_queue_helper(struct hw_fence_driver_data *drv_data, u32 clie int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 hash, u32 error); inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data); +char *_get_queue_type(int queue_type); int hw_fence_read_queue(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, int queue_type); diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index f4e95c1fd4..3cb12fe6d7 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -31,6 +31,11 @@ #define ktime_compare_safe(A, B) ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0)) +#define HFENCE_QHDR_MSG \ + "Client:%d %s q_sz_bytes:%u rd_idx:%u wr_idx:%u tx_wm:%u skips:%s start:%u factor:%u\n" +#define HFENCE_QPAYLOAD_MSG \ + "%s[%d]: hash:%llu ctx:%llu seqno:%llu f:%llu d:%llu err:%u time:%llu type:%u\n" + u32 msm_hw_fence_debug_level = HW_FENCE_PRINTK; /** @@ -101,9 +106,9 @@ void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence } #if IS_ENABLED(CONFIG_DEBUG_FS) -static int _get_debugfs_input_client(struct file *file, +static int _get_debugfs_input_client_with_min(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos, - struct hw_fence_driver_data **drv_data) + struct hw_fence_driver_data **drv_data, int client_id_min) { char buf[10]; int client_id; @@ -126,15 +131,23 @@ static int _get_debugfs_input_client(struct file *file, if (kstrtouint(buf, 0, &client_id)) return -EFAULT; - if (client_id < HW_FENCE_CLIENT_ID_CTX0 || client_id >= HW_FENCE_CLIENT_MAX) { + if (client_id < client_id_min || client_id >= (*drv_data)->clients_num) { HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", client_id, - HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_MAX); + client_id_min, (*drv_data)->clients_num); return -EINVAL; } return client_id; } +static int _get_debugfs_input_client(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos, + struct hw_fence_driver_data **drv_data) +{ + return _get_debugfs_input_client_with_min(file, user_buf, count, ppos, drv_data, + HW_FENCE_CLIENT_ID_CTX0); +} + static int _debugfs_ipcc_trigger(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos, u32 tx_client, u32 rx_client) { @@ -680,7 +693,7 @@ static ssize_t hw_fence_dbg_dump_events_rd(struct file *file, char __user *user_ size_t user_buf_size, loff_t *ppos) { struct hw_fence_driver_data *drv_data; - u32 entry_size = sizeof(struct msm_hw_fence_event), max_size = SZ_4K; + u32 entry_size = sizeof(HFENCE_EVT_MSG), max_size = SZ_4K; char *buf = NULL; int len = 0; static u64 start_time; @@ -767,45 +780,69 @@ exit: return len; } +static int _dump_queue_header(struct hw_fence_driver_data *drv_data, enum hw_fence_drv_prio prio, + struct msm_hw_fence_queue *queue, int client_id, int queue_type, u32 **rd_idx_ptr, + u32 **wr_idx_ptr, u32 **tx_wm_ptr) +{ + if (!drv_data || !queue || !rd_idx_ptr || !wr_idx_ptr || !tx_wm_ptr) { + HWFNC_ERR("invalid drv_data:0x%pK q:0x%pK rd_idx:0x%pK wr_idx:0x%pK tx_wm:0x%pK\n", + drv_data, queue, rd_idx_ptr, wr_idx_ptr, tx_wm_ptr); + return -EINVAL; + } + + hw_fence_get_queue_idx_ptrs(drv_data, queue->va_header, rd_idx_ptr, wr_idx_ptr, + tx_wm_ptr); + + HWFNC_DBG_DUMP(prio, HFENCE_QHDR_MSG, client_id, _get_queue_type(queue_type), + queue->q_size_bytes, **rd_idx_ptr, **wr_idx_ptr, **tx_wm_ptr, + queue->skip_wr_idx ? "true" : "false", queue->rd_wr_idx_start, + queue->rd_wr_idx_factor); + + return 0; +} + +static struct msm_hw_fence_queue_payload *_dump_queue_payload(enum hw_fence_drv_prio prio, + struct msm_hw_fence_queue *queue, int index, int queue_type) +{ + struct msm_hw_fence_queue_payload *payload; + u32 *read_ptr; + u64 timestamp; + + read_ptr = ((u32 *)queue->va_queue + + (index * (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)))); + payload = (struct msm_hw_fence_queue_payload *)read_ptr; + timestamp = (u64)payload->timestamp_lo | ((u64)payload->timestamp_hi << 32); + HWFNC_DBG_DUMP(prio, HFENCE_QPAYLOAD_MSG, _get_queue_type(queue_type), + index, payload->hash, payload->ctxt_id, payload->seqno, payload->flags, + payload->client_data, payload->error, timestamp, payload->type); + + return payload; +} + static void _dump_queue(struct hw_fence_driver_data *drv_data, enum hw_fence_drv_prio prio, struct msm_hw_fence_client *hw_fence_client, int queue_type) { struct msm_hw_fence_queue *queue; - struct msm_hw_fence_queue_payload *payload; - u64 timestamp; - u32 *read_ptr, *rd_idx_ptr, *wr_idx_ptr, *tx_wm_ptr, queue_entries; + u32 queue_entries, *rd_idx_ptr, *wr_idx_ptr, *tx_wm_ptr; int i; - queue = &hw_fence_client->queues[queue_type - 1]; + queue = &hw_fence_client->queues[queue_type]; if ((queue_type > hw_fence_client->queues_num) || !queue || !queue->va_header || !queue->va_queue) { HWFNC_ERR("Cannot dump client:%d q_type:%s q_ptr:0x%pK q_header:0x%pK q_va:0x%pK\n", - hw_fence_client->client_id, - (queue_type == HW_FENCE_TX_QUEUE) ? "TX QUEUE" : "RX QUEUE", - queue, queue ? queue->va_header : NULL, queue ? queue->va_queue : NULL); + hw_fence_client->client_id, _get_queue_type(queue_type), queue, + queue ? queue->va_header : NULL, queue ? queue->va_queue : NULL); return; } - hw_fence_get_queue_idx_ptrs(drv_data, queue->va_header, &rd_idx_ptr, &wr_idx_ptr, - &tx_wm_ptr); mb(); /* make sure data is ready before read */ - HWFNC_DBG_DUMP(prio, "%s va:0x%pK rd_idx:%u wr_idx:%u tx_wm:%u q_size_bytes:%u\n", - (queue_type == HW_FENCE_TX_QUEUE) ? "TX QUEUE" : "RX QUEUE", queue->va_queue, - *rd_idx_ptr, *wr_idx_ptr, *tx_wm_ptr, queue->q_size_bytes); + _dump_queue_header(drv_data, prio, queue, hw_fence_client->client_id, queue_type, + &rd_idx_ptr, &wr_idx_ptr, &tx_wm_ptr); queue_entries = queue->q_size_bytes / HW_FENCE_CLIENT_QUEUE_PAYLOAD; for (i = 0; i < queue_entries; i++) { - read_ptr = ((u32 *)queue->va_queue + - (i * (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)))); - payload = (struct msm_hw_fence_queue_payload *)read_ptr; - timestamp = (u64)payload->timestamp_lo | ((u64)payload->timestamp_hi << 32); - - HWFNC_DBG_DUMP(prio, - "%s[%d]: hash:%llu ctx:%llu seqno:%llu f:%llu d:%llu err:%u time:%llu\n", - (queue_type == HW_FENCE_TX_QUEUE) ? "tx" : "rx", i, payload->hash, - payload->ctxt_id, payload->seqno, payload->flags, payload->client_data, - payload->error, timestamp); + _dump_queue_payload(prio, queue, i, queue_type); } } @@ -817,28 +854,61 @@ void hw_fence_debug_dump_queues(struct hw_fence_driver_data *drv_data, enum hw_f return; } - HWFNC_DBG_DUMP(prio, "Queues for client %d\n", hw_fence_client->client_id); if (hw_fence_client->queues_num == HW_FENCE_CLIENT_QUEUES) - _dump_queue(drv_data, prio, hw_fence_client, HW_FENCE_RX_QUEUE); - _dump_queue(drv_data, prio, hw_fence_client, HW_FENCE_TX_QUEUE); + _dump_queue(drv_data, prio, hw_fence_client, HW_FENCE_RX_QUEUE - 1); + _dump_queue(drv_data, prio, hw_fence_client, HW_FENCE_TX_QUEUE - 1); } /** - * hw_fence_dbg_dump_queues_wr() - debugfs wr to dump the hw-fences queues. + * hw_fence_dbg_dump_queues_wr() - debugfs wr to control the dump of hw-fences queues. * @file: file handler. * @user_buf: user buffer content for debugfs. * @count: size of the user buffer. * @ppos: position offset of the user buffer. * - * This debugfs dumps the hw-fence queues. Takes as input the desired client to dump. - * Dumps to debug msgs the contents of the TX and RX queues for that client, if they exist. + * This debugfs receives as parameter either zero to dump the ctrl queues or the client_id for + * which to dump client queues in the next read of the same debugfs node. */ -static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) +static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) { struct hw_fence_driver_data *drv_data; int client_id; + client_id = _get_debugfs_input_client_with_min(file, user_buf, count, ppos, &drv_data, 0); + if (client_id < 0) + return -EINVAL; + + drv_data->debugfs_data.client_id_rd = client_id; + + return count; +} + +/** + * hw_fence_dbg_dump_queues_rd() - debugfs read to dump ctrl or client queues. + * @file: file handler. + * @user_buf: user buffer content for debugfs. + * @user_buf_size: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs dumps either hw-fence ctrl queues or the client queues of a given client. The user + * can provide zero (to print the ctrl queues) or the client_id of interest by writing to this + * debugfs node (see documentation for the write in 'hw_fence_dbg_dump_queues_wr'). By default, + * dumps the ctrl queues. + */ +static ssize_t hw_fence_dbg_dump_queues_rd(struct file *file, char __user *user_buf, + size_t user_buf_size, loff_t *ppos) +{ + struct hw_fence_driver_data *drv_data; + struct msm_hw_fence_client *hw_fence_client; + struct msm_hw_fence_queue *queue; + u32 entry_size = sizeof(HFENCE_QPAYLOAD_MSG), max_size = SZ_4K; + u32 client_id, queue_entries, queues_num, *rd_idx_ptr, *wr_idx_ptr, *tx_wm_ptr; + char *buf = NULL; + int len = 0; + static u32 index, queue_type; + static bool qhdr_dumped; + if (!file || !file->private_data) { HWFNC_ERR("unexpected data file:0x%pK private_data:0x%pK\n", file, file ? file->private_data : NULL); @@ -846,17 +916,95 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user } drv_data = file->private_data; - client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data); - if (client_id < 0) - return -EINVAL; + client_id = drv_data->debugfs_data.client_id_rd; + if (client_id == 0) { + queue = &drv_data->ctrl_queues[queue_type]; + queues_num = HW_FENCE_CTRL_QUEUES; + } else { + if (!drv_data->clients[client_id]) { + HWFNC_ERR("client %d not initialized\n", client_id); + return -EINVAL; + } + hw_fence_client = drv_data->clients[client_id]; + queue = &hw_fence_client->queues[queue_type]; + queues_num = hw_fence_client->queues_num; + } + queue_entries = queue->q_size_bytes / HW_FENCE_CLIENT_QUEUE_PAYLOAD; - if (!drv_data->clients[client_id]) { - HWFNC_ERR("client %d not initialized\n", client_id); + if (queue_type >= queues_num) { + HWFNC_DBG_H("no more data client_id:%d q_num:%u q_entries:%u\n", client_id, + queues_num, queue_entries); + queue_type = 0; + index = 0; + return 0; + } + + if (!queue || !queue->va_header || !queue->va_queue) { + HWFNC_ERR("client:%d %s q_ptr:0x%pK qhdr_va:0x%pK q_va:0x%pK uninitialized\n", + client_id, _get_queue_type(queue_type), queue, + queue ? queue->va_header : NULL, queue ? queue->va_queue : NULL); return -EINVAL; } - hw_fence_debug_dump_queues(drv_data, HW_FENCE_PRINTK, drv_data->clients[client_id]); - return count; + if (user_buf_size < entry_size) { + HWFNC_ERR("Not enough buff size:%zu to dump entries:%d\n", user_buf_size, + entry_size); + return -EINVAL; + } + + buf = kvzalloc(max_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (!qhdr_dumped) { + mb(); /* make sure data is ready before read */ + _dump_queue_header(drv_data, HW_FENCE_INFO, queue, client_id, queue_type, + &rd_idx_ptr, &wr_idx_ptr, &tx_wm_ptr); + len += scnprintf(buf + len, max_size - len, HFENCE_QHDR_MSG, client_id, + _get_queue_type(queue_type), queue->q_size_bytes, *rd_idx_ptr, *wr_idx_ptr, + *tx_wm_ptr, queue->skip_wr_idx ? "true" : "false", queue->rd_wr_idx_start, + queue->rd_wr_idx_factor); + qhdr_dumped = true; + } + + for (; index < queue_entries && len < (max_size - entry_size); index++) { + struct msm_hw_fence_queue_payload *payload; + u64 timestamp; + + payload = _dump_queue_payload(HW_FENCE_INFO, queue, index, queue_type); + + if (!(payload->hash || payload->ctxt_id || payload->seqno || payload->flags || + payload->client_data || payload->error || payload->timestamp_lo || + payload->timestamp_hi || payload->type)) + continue; + + timestamp = (u64)payload->timestamp_lo | ((u64)payload->timestamp_hi << 32); + len += scnprintf(buf + len, max_size - len, HFENCE_QPAYLOAD_MSG, + _get_queue_type(queue_type), index, payload->hash, payload->ctxt_id, + payload->seqno, payload->flags, payload->client_data, payload->error, + timestamp, payload->type); + } + if (index >= queue_entries) { + index = 0; + queue_type++; + qhdr_dumped = false; + } + + if (len <= 0 || len > user_buf_size) { + HWFNC_ERR("len:%d invalid buff size:%zu\n", len, user_buf_size); + len = 0; + goto exit; + } + + if (copy_to_user(user_buf, buf, len)) { + HWFNC_ERR("failed to copy to user!\n"); + len = -EFAULT; + goto exit; + } + *ppos += len; +exit: + kvfree(buf); + return len; } /** @@ -875,7 +1023,7 @@ static ssize_t hw_fence_dbg_dump_table_rd(struct file *file, char __user *user_b size_t user_buf_size, loff_t *ppos) { struct hw_fence_driver_data *drv_data; - int entry_size = sizeof(struct msm_hw_fence); + int entry_size = sizeof(HFENCE_TBL_MSG); char *buf = NULL; int len = 0, max_size = SZ_4K; static u32 index, cnt; @@ -1292,6 +1440,7 @@ static const struct file_operations hw_fence_dump_table_fops = { static const struct file_operations hw_fence_dump_queues_fops = { .open = simple_open, .write = hw_fence_dbg_dump_queues_wr, + .read = hw_fence_dbg_dump_queues_rd, }; static const struct file_operations hw_fence_dump_events_fops = { From f9a5eb2f9bf4475a09e715f341df81aaf19613cd Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 19 Sep 2024 13:32:27 -0700 Subject: [PATCH 162/166] mm-drivers: hw_fence: avoid error message when ctrl queue is empty The fence controller can send HW Fence Driver back-to-back ctrl queue messages. The signaled_clients_mask is cleared before the ctrl queue is processed, in which case the hw-fence soccp listener thread may process multiple ctrl queue payload messages for "one" signal from fence controller; when the fence controller processes the "next" IPCC signal, the HW Fence Driver will find that the ctrl queue is empty. This change avoids printing an error message for this valid scenario. Change-Id: I32662fb5894d1f95c587d8a836855679aed0f637 Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_utils.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 7ae6a489b2..42148cbeae 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -411,7 +411,7 @@ static int _process_ctrl_rx_queue(struct hw_fence_driver_data *drv_data) &drv_data->ctrl_queues[HW_FENCE_RX_QUEUE - 1], &payload); if (read < 0) { HWFNC_DBG_Q("unable to read ctrl rxq\n"); - return read; + return 0; } switch (payload.type) { case HW_FENCE_PAYLOAD_TYPE_2: From 1d3116ed0f24286a685a52556ca675f37ef5c618 Mon Sep 17 00:00:00 2001 From: Akash Gajjar Date: Fri, 18 Oct 2024 16:12:16 +0530 Subject: [PATCH 163/166] mm-drivers: add validation check to avoid deadlock condition Add validation check during spec fence array bind to avoid populating base fence as the user fence. this check avoids deadlock issue observed during enable software signaling. Change-Id: I954570d1d7c1281d0662183f92e5a0a49f591ca5 Signed-off-by: Jayaprakash Madisetty Signed-off-by: Akash Gajjar Signed-off-by: lnxdisplay --- sync_fence/src/qcom_sync_file.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/sync_fence/src/qcom_sync_file.c b/sync_fence/src/qcom_sync_file.c index 6295e04e5c..0bb94a97d3 100644 --- a/sync_fence/src/qcom_sync_file.c +++ b/sync_fence/src/qcom_sync_file.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2021, The Linux Foundation. All rights reserved. */ @@ -407,6 +407,13 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) user_fds[i], sync_bind_info->out_bind_fd); ret = -EINVAL; goto bind_invalid; + } else if (user_fence->context == fence_array->base.context && + user_fence->seqno == fence_array->base.seqno) { + pr_err("invalid spec fence, ufd:%d o_b_fd:%d ctx:%lld seqno:%lld\n", + user_fds[i], sync_bind_info->out_bind_fd, + user_fence->context, user_fence->seqno); + ret = -EINVAL; + goto bind_invalid; } fence_array->fences[i] = user_fence; /* From c96971402c37bf0b54e215fbd747f74af58de86a Mon Sep 17 00:00:00 2001 From: Sailesh Reddy Male Date: Wed, 20 Nov 2024 15:17:07 +0530 Subject: [PATCH 164/166] mm-drivers: hw_fence: Add more client_id specific logs Add changes to log validation client id during synx_wait. This is needed to debug issues related to val clients. Change-Id: Id4eaf0e21a762ee356ffbfb3efcd04065f609206 Signed-off-by: Sailesh Reddy Male Signed-off-by: lnxdisplay --- hw_fence/src/hw_fence_drv_debug.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 3cb12fe6d7..5831f15912 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -1345,7 +1345,8 @@ static long _process_val_signal(struct hw_fence_driver_data *drv_data, context = fence ? fence->context : 0; seqno = fence ? fence->seqno : 0; - + HWFNC_DBG_L("Client_id:%u attempting to process signalled fence:%llu\n", + hw_fence_client->client_id, hash); while (read) { read = hw_fence_read_queue(drv_data, hw_fence_client, &payload, queue_type); if (read < 0) { @@ -1353,8 +1354,8 @@ static long _process_val_signal(struct hw_fence_driver_data *drv_data, hw_fence_client->client_id); break; } - HWFNC_DBG_L("rxq read: hash:%llu, flags:%llu, error:%u\n", - payload.hash, payload.flags, payload.error); + HWFNC_DBG_L("Client_id: %u rxq read: hash:%llu, flags:%llu, error:%u\n", + hw_fence_client->client_id, payload.hash, payload.flags, payload.error); if ((fence && payload.ctxt_id == context && payload.seqno == seqno) || (mask && ((mask & hash) == (mask & payload.hash)))) { *error = payload.error; @@ -1369,9 +1370,10 @@ static long _process_val_signal(struct hw_fence_driver_data *drv_data, } } - HWFNC_ERR("fence received did not match the fence expected\n"); - HWFNC_ERR("received: hash:%llu ctx:%llu seq:%llu expected: hash:%llu ctx:%llu seq:%llu\n", - payload.hash, payload.ctxt_id, payload.seqno, hash, context, seqno); + HWFNC_ERR("fence received: hash:%llu ctx:%llu seq:%llu did not match expected fence\n", + payload.hash, payload.ctxt_id, payload.seqno); + HWFNC_ERR("Client_id:%u fence expected: hash:%llu ctx:%llu seq:%llu\n", + hw_fence_client->client_id, hash, context, seqno); return -EINVAL; } @@ -1389,6 +1391,8 @@ int hw_fence_debug_wait_val(struct hw_fence_driver_data *drv_data, } exp_ktime = ktime_add_ms(ktime_get(), timeout_ms); + HWFNC_DBG_L("Client_id:%u attempting to wait on fence:%llu\n", + hw_fence_client->client_id, hash); while (ret) { do { ret = wait_event_timeout(hw_fence_client->wait_queue, @@ -1399,7 +1403,8 @@ int hw_fence_debug_wait_val(struct hw_fence_driver_data *drv_data, ktime_compare_safe(exp_ktime, cur_ktime) > 0); if (!ret) { - HWFNC_ERR("timed out waiting for the client signal %llu\n", timeout_ms); + HWFNC_ERR("Client_id: %u timed out waiting for the client signal %llu\n", + hw_fence_client->client_id, timeout_ms); /* Decrement the refcount that hw_sync_get_fence increments */ dma_fence_put(fence); return -ETIMEDOUT; From 2e58e0a01ef8c9a62631432e9ee391852f2987f1 Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 31 Oct 2024 10:29:43 -0700 Subject: [PATCH 165/166] mm-drivers: hw_fence: reduce number of parameters in hw-fence lookup Currently, the _hw_fence_lookup_and_process_range function has greater than eight parameters, which pushes parameters to the stack. Reduce the number of parameters in function calls by splitting apart this function. Change-Id: Ie4f0e21c8b9d79c18148e67d34005c6b10c15fdb Signed-off-by: Grace An Signed-off-by: lnxdisplay --- hw_fence/include/hw_fence_drv_priv.h | 7 - hw_fence/src/hw_fence_drv_priv.c | 421 ++++++++++++--------------- 2 files changed, 184 insertions(+), 244 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 184704c1d2..7a8a04255f 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -132,13 +132,6 @@ #define DMA_FENCE_HASH_TABLE_BIT (12) /* size of table = (1 << 12) = 4096 */ #define DMA_FENCE_HASH_TABLE_SIZE (1 << DMA_FENCE_HASH_TABLE_BIT) -enum hw_fence_lookup_ops { - HW_FENCE_LOOKUP_OP_CREATE = 0x1, - HW_FENCE_LOOKUP_OP_DESTROY, - HW_FENCE_LOOKUP_OP_CREATE_JOIN, - HW_FENCE_LOOKUP_OP_FIND_FENCE -}; - /** * enum hw_fence_client_data_id - Enum with the clients having client_data, an optional * parameter passed from the waiting client and returned diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 8d8c6e82d9..3d09229d78 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -36,6 +36,17 @@ /* number of fences searched for HW Fence import */ #define HW_FENCE_FIND_THRESHOLD 10 +/* + * Iterates through the hw-fence table populating hash and hw_fence pointers accordingly. + * Note: This internally takes the hw-fence lock during iteration so this loop must be + * exited by setting found = true. + */ +#define for_each_hw_fence(drv_data, hfence, hash, ctx, seqno, start, end, i, found) \ + for ((i) = _hw_fence_iterator_init((drv_data), (hfence), (hash), (ctx), (seqno), \ + (start), (end)); \ + ((i) < (end)) && !(found); \ + (i) = _hw_fence_iterator_next((drv_data), (hfence), (hash), (i), (end), (found))) + inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data) { #ifdef HWFENCE_USE_SLEEP_TIMER @@ -953,45 +964,17 @@ void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data, kfree(hw_fence_client); } -static inline int _calculate_hash(u32 table_total_entries, u64 context, u64 seqno, - u64 step, u64 *hash) +static inline int _calculate_hash(u64 context, u64 seqno, u64 m_size) { - u64 m_size = table_total_entries; - int val = 0; + u64 a_multiplier = HW_FENCE_HASH_A_MULT; + u64 c_multiplier = HW_FENCE_HASH_C_MULT; + u64 b_multiplier = context + (context - 1); /* odd multiplier */ - if (step == 0) { - u64 a_multiplier = HW_FENCE_HASH_A_MULT; - u64 c_multiplier = HW_FENCE_HASH_C_MULT; - u64 b_multiplier = context + (context - 1); /* odd multiplier */ - - /* - * if m, is power of 2, we can optimize with right shift, - * for now we don't do it, to avoid assuming a power of two - */ - *hash = (a_multiplier * seqno * b_multiplier + (c_multiplier * context)) % m_size; - } else { - if (step >= m_size) { - /* - * If we already traversed the whole table, return failure since this means - * there are not available spots, table is either full or full-enough - * that we couldn't find an available spot after traverse the whole table. - * Ideally table shouldn't be so full that we cannot find a value after some - * iterations, so this maximum step size could be optimized to fail earlier. - */ - HWFNC_ERR("Fence Table tranversed and no available space!\n"); - val = -EINVAL; - } else { - /* - * Linearly increment the hash value to find next element in the table - * note that this relies in the 'scrambled' data from the original hash - * Also, add a mod division to wrap-around in case that we reached the - * end of the table - */ - *hash = (*hash + 1) % m_size; - } - } - - return val; + /* + * if m, is power of 2, we can optimize with right shift, + * for now we don't do it, to avoid assuming a power of two + */ + return (a_multiplier * seqno * b_multiplier + (c_multiplier * context)) % m_size; } static inline struct msm_hw_fence *_get_hw_fence(u32 table_total_entries, @@ -1007,10 +990,62 @@ static inline struct msm_hw_fence *_get_hw_fence(u32 table_total_entries, return &hw_fences_tbl[hash]; } -static bool _is_hw_fence_free(struct msm_hw_fence *hw_fence, u64 context, u64 seqno) +static int _hw_fence_lookup_next(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence **hw_fence, u64 *hash, u32 init_step, u32 incr, u32 m_size) { - /* If valid is set, the hw fence is not free */ - return hw_fence->valid ? false : true; + *hash = (*hash + incr) % m_size; + *hw_fence = _get_hw_fence(m_size, drv_data->hw_fences_tbl, *hash); + if (!*hw_fence) { + HWFNC_ERR("failed to get hw-fence hash:%llu\n", *hash); + return m_size; + } + GLOBAL_ATOMIC_STORE(drv_data, &(*hw_fence)->lock, 1); + + return init_step + incr; +} + +/* returns initial step value and initializes hash and hw_fence */ +static int _hw_fence_iterator_init(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence **hw_fence, u64 *hash, u64 context, u64 seqno, u32 start_step, + u32 end_step) +{ + u32 m_size; + + if (!drv_data || !hw_fence || !hash || start_step >= end_step || + end_step > drv_data->hw_fences_tbl_cnt) { + HWFNC_ERR("invalid drv_data:0x%pK hwf:0x%pK h:0x%pK start:%u end:%u tbl_size:%u\n", + drv_data, hw_fence, hash, start_step, end_step, + drv_data ? drv_data->hw_fences_tbl_cnt : -1); + return end_step; + } + + m_size = drv_data->hw_fences_tbl_cnt; + *hash = _calculate_hash(context, seqno, m_size); + HWFNC_DBG_LUT("ctx:%llu seq:%llu tbl_size:%u start_step:%u initial_hash:%llu\n", context, + seqno, m_size, start_step, *hash); + + return _hw_fence_lookup_next(drv_data, hw_fence, hash, 0, start_step, m_size); +} + +/* returns new step value and populates hash and hw_fence */ +static int _hw_fence_iterator_next(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence **hw_fence, u64 *hash, u32 curr_step, u32 end_step, bool found) +{ + u32 m_size = drv_data->hw_fences_tbl_cnt; + + /* unlock previous entry */ + GLOBAL_ATOMIC_STORE(drv_data, &(*hw_fence)->lock, 0); + if ((curr_step + 1) >= end_step || found) { + HWFNC_DBG_LUT("found:%s step:%d max:%d h:%llu v:%u ctx:%llu seq:%llu flg:0x%llx\n", + found ? "true" : "false", curr_step, end_step, *hash, (*hw_fence)->valid, + (*hw_fence)->ctx_id, (*hw_fence)->seq_id, (*hw_fence)->flags); + return found ? curr_step : curr_step + 1; + } + + HWFNC_DBG_LUT("cmp failed resolving collision step:%u max:%u hash:%llu\n", curr_step + 1, + end_step, *hash); + + return _hw_fence_lookup_next(drv_data, hw_fence, hash, curr_step, 1, m_size); } static bool _hw_fence_match(struct msm_hw_fence *hw_fence, u64 context, u64 seqno) @@ -1047,34 +1082,9 @@ static void _cleanup_hw_fence(struct msm_hw_fence *hw_fence) memset(hw_fence->client_data, 0, sizeof(hw_fence->client_data)); } -/* This function must be called with the hw fence lock */ -static int _reserve_hw_fence(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence *hw_fence, u32 client_id, - u64 context, u64 seqno, u32 hash, u32 pending_child_cnt) -{ - _cleanup_hw_fence(hw_fence); - - /* reserve this HW fence */ - hw_fence->valid = 1; - - hw_fence->ctx_id = context; - hw_fence->seq_id = seqno; - hw_fence->flags = 0; /* fence just reserved, there shouldn't be any flags set */ - hw_fence->fence_allocator = client_id; - hw_fence->fence_create_time = hw_fence_get_qtime(drv_data); - /* one released by creating client; one released by FCTL */ - hw_fence->refcount = HW_FENCE_FCTL_REFCOUNT + 1; - - HWFNC_DBG_LUT("Reserved fence client:%d ctx:%llu seq:%llu hash:%u\n", - client_id, context, seqno, hash); - - return 0; -} - /* This function must be called with the hw fence lock */ static int _unreserve_hw_fence(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence *hw_fence, u32 client_id, - u64 context, u64 seqno, u32 hash, u32 pending_child_cnt) + struct msm_hw_fence *hw_fence, u32 hash) { if (hw_fence->refcount & HW_FENCE_HLOS_REFCOUNT_MASK) hw_fence->refcount--; @@ -1089,8 +1099,9 @@ static int _unreserve_hw_fence(struct hw_fence_driver_data *drv_data, hw_fence->valid = 0; } - HWFNC_DBG_LUT("Unreserved fence client:%d ctx:%llu seq:%llu hash:%u refcount:%x\n", - client_id, context, seqno, hash, hw_fence->refcount); + HWFNC_DBG_LUT("Removed ref on fence alloc:%d ctx:%llu seq:%llu refcount:0x%x hash:%u\n", + hw_fence->fence_allocator, hw_fence->ctx_id, hw_fence->seq_id, hw_fence->refcount, + hash); return 0; } @@ -1132,14 +1143,14 @@ int hw_fence_destroy_refcount(struct hw_fence_driver_data *drv_data, u64 hash, u } /* This function must be called with the hw fence lock */ -static int _reserve_join_fence(struct hw_fence_driver_data *drv_data, +static int _reserve_hw_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hw_fence, u32 client_id, u64 context, u64 seqno, u32 hash, u32 pending_child_cnt) { _cleanup_hw_fence(hw_fence); /* reserve this HW fence */ - hw_fence->valid = true; + hw_fence->valid = 1; hw_fence->ctx_id = context; hw_fence->seq_id = seqno; @@ -1150,16 +1161,15 @@ static int _reserve_join_fence(struct hw_fence_driver_data *drv_data, hw_fence->pending_child_cnt = pending_child_cnt; - HWFNC_DBG_LUT("Reserved join fence client:%d ctx:%llu seq:%llu hash:%u\n", - client_id, context, seqno, hash); + HWFNC_DBG_LUT("Reserved fence client:%d ctx:%llu seq:%llu pending_child:%u hash:%u\n", + client_id, context, seqno, pending_child_cnt, hash); return 0; } /* This function must be called with the hw fence lock */ -static int _fence_found(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence *hw_fence, u32 client_id, - u64 context, u64 seqno, u32 hash, u32 pending_child_cnt) +static int _fence_found(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hw_fence, + u32 hash) { if ((hw_fence->refcount & HW_FENCE_HLOS_REFCOUNT_MASK) == HW_FENCE_HLOS_REFCOUNT_MASK) return -EINVAL; @@ -1169,176 +1179,123 @@ static int _fence_found(struct hw_fence_driver_data *drv_data, * is done, the refcount needs to be decremented either explicitly by the client or as part * of processing in HW Fence Driver. */ - hw_fence->refcount++; - HWFNC_DBG_LUT("Found fence client:%d ctx:%llu seq:%llu hash:%u ref:0x%x\n", - client_id, context, seqno, hash, hw_fence->refcount); + HWFNC_DBG_LUT("Found fence alloc:%d ctx:%llu seq:%llu refcount:0x%x hash:%u\n", + hw_fence->fence_allocator, hw_fence->ctx_id, hw_fence->seq_id, hw_fence->refcount, + hash); return 0; } -char *_get_op_mode(enum hw_fence_lookup_ops op_code) +struct msm_hw_fence *_hw_fence_lookup_and_create_range(struct hw_fence_driver_data *drv_data, + u32 client_id, u64 context, u64 seqno, u32 pending_child_cnt, u64 *hash, + u32 start_step, u32 end_step, u64 flags) { - switch (op_code) { - case HW_FENCE_LOOKUP_OP_CREATE: - return "CREATE"; - case HW_FENCE_LOOKUP_OP_DESTROY: - return "DESTROY"; - case HW_FENCE_LOOKUP_OP_CREATE_JOIN: - return "CREATE_JOIN"; - case HW_FENCE_LOOKUP_OP_FIND_FENCE: - return "FIND_FENCE"; - default: - return "UNKNOWN"; + struct msm_hw_fence *hw_fence; + bool hw_fence_found; + int ret = 0; + u32 step; + + if (!drv_data || !hash) { + HWFNC_ERR("Invalid input for hw_fence_lookup drv_data:0x%pK hash:0x%pK\n", + drv_data, hash); + return NULL; } - return "UNKNOWN"; + for_each_hw_fence(drv_data, &hw_fence, hash, context, seqno, start_step, end_step, + step, hw_fence_found) { + if (!hw_fence->valid) { + /* Process the hw fence found by the algorithm */ + ret = _reserve_hw_fence(drv_data, hw_fence, client_id, context, seqno, + *hash, pending_child_cnt); + + /* update memory table with processing */ + wmb(); + + HWFNC_DBG_L("client_id:%u ctx:%llu seqno:%llu hash:%llu step:%u\n", + client_id, context, seqno, *hash, step); + + hw_fence_found = true; + } else if (_hw_fence_match(hw_fence, context, seqno)) { + hw_fence_found = true; + if (flags & MSM_HW_FENCE_FLAG_CREATE_SIGNALED) + ret = _fence_found(drv_data, hw_fence, *hash); + else + ret = -EALREADY; + + HWFNC_DBG_L("client_id:%u ctx:%llu seqno:%llu hash:%llu step:%u\n", + client_id, context, seqno, *hash, step); + } + } + + if (ret == -EALREADY) { + HWFNC_ERR("can't create hfence w/ same ctx:%llu seq:%llu\n", context, seqno); + return NULL; + } + + /* If we iterated through the whole list and didn't find available fences, return null */ + if (!hw_fence_found || ret) { + HWFNC_DBG_LUT("fail to process create hw_fence ctx:%llu seq:%llu start:%u end:%u\n", + context, seqno, start_step, end_step); + return NULL; + } + + return hw_fence; +} + +struct msm_hw_fence *_hw_fence_lookup_and_create(struct hw_fence_driver_data *drv_data, + u32 client_id, u64 context, u64 seqno, u32 pending_child_cnt, u64 *hash) +{ + return _hw_fence_lookup_and_create_range(drv_data, client_id, context, seqno, + pending_child_cnt, hash, 0, drv_data->hw_fences_tbl_cnt, 0); } struct msm_hw_fence *_hw_fence_lookup_and_process_range(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence *hw_fences_tbl, u64 context, u64 seqno, u32 client_id, - u32 pending_child_cnt, enum hw_fence_lookup_ops op_code, u64 *hash, u64 flags, - u64 start_step, u64 end_step) + u64 context, u64 seqno, u64 *hash, u32 start_step, u32 end_step, + int (*process_fn)(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hfence, + u32 hash)) { - bool (*compare_fnc)(struct msm_hw_fence *hfence, u64 context, u64 seqno); - int (*process_fnc)(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hfence, - u32 client_id, u64 context, u64 seqno, u32 hash, u32 pending); - struct msm_hw_fence *hw_fence = NULL; - u64 step = start_step; + struct msm_hw_fence *hw_fence; + bool hw_fence_found; int ret = 0; - bool hw_fence_found = false; + u32 step; - if (!hash | !drv_data | !hw_fences_tbl) { - HWFNC_ERR("Invalid input for hw_fence_lookup\n"); + if (!drv_data || !hash || !process_fn) { + HWFNC_ERR("Invalid input drv_data:0x%pK hash:0x%pK process_fn:0x%pK\n", + drv_data, hash, process_fn); return NULL; } - /* - * When start_step != 0, the hash is already initialized at the correct value and should - * not be reset. - */ - if (!step) - *hash = ~0; - - HWFNC_DBG_LUT("hw_fence_lookup: %d\n", op_code); - - switch (op_code) { - case HW_FENCE_LOOKUP_OP_CREATE: - compare_fnc = &_is_hw_fence_free; - process_fnc = &_reserve_hw_fence; - break; - case HW_FENCE_LOOKUP_OP_DESTROY: - compare_fnc = &_hw_fence_match; - process_fnc = &_unreserve_hw_fence; - break; - case HW_FENCE_LOOKUP_OP_CREATE_JOIN: - compare_fnc = &_is_hw_fence_free; - process_fnc = &_reserve_join_fence; - break; - case HW_FENCE_LOOKUP_OP_FIND_FENCE: - compare_fnc = &_hw_fence_match; - process_fnc = &_fence_found; - break; - default: - HWFNC_ERR("Unknown op code:%d\n", op_code); - return NULL; - } - - while (!hw_fence_found && (step < end_step)) { - - /* Calculate the Hash for the Fence */ - ret = _calculate_hash(drv_data->hw_fence_table_entries, context, seqno, step, hash); - if (ret) { - HWFNC_ERR("error calculating hash ctx:%llu seqno:%llu hash:%llu\n", - context, seqno, *hash); - break; - } - HWFNC_DBG_LUT("calculated hash:%llu [ctx:%llu seqno:%llu]\n", *hash, context, - seqno); - - /* Get element from the table using the hash */ - hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, *hash); - HWFNC_DBG_LUT("hw_fence_tbl:0x%pK hw_fence:0x%pK, hash:%llu valid:0x%x\n", - hw_fences_tbl, hw_fence, *hash, hw_fence ? hw_fence->valid : 0xbad); - if (!hw_fence) { - HWFNC_ERR("bad hw fence ctx:%llu seqno:%llu hash:%llu\n", - context, seqno, *hash); - break; - } - - GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); - - /* compare to either find a free fence or find an allocated fence */ - if (compare_fnc(hw_fence, context, seqno)) { - + for_each_hw_fence(drv_data, &hw_fence, hash, context, seqno, start_step, end_step, step, + hw_fence_found) { + if (_hw_fence_match(hw_fence, context, seqno)) { /* Process the hw fence found by the algorithm */ - if (process_fnc) { - ret = process_fnc(drv_data, hw_fence, client_id, context, seqno, - *hash, pending_child_cnt); - - /* update memory table with processing */ - wmb(); - } - - HWFNC_DBG_L("client_id:%u op:%s ctx:%llu seqno:%llu hash:%llu step:%llu\n", - client_id, _get_op_mode(op_code), context, seqno, *hash, step); - + ret = process_fn(drv_data, hw_fence, *hash); + HWFNC_DBG_L("ctx:%llu seqno:%llu hash:%llu step:%u\n", context, seqno, + *hash, step); hw_fence_found = true; - } else { - if ((op_code == HW_FENCE_LOOKUP_OP_CREATE || - op_code == HW_FENCE_LOOKUP_OP_CREATE_JOIN) && - seqno == hw_fence->seq_id && context == hw_fence->ctx_id) { - if (flags & MSM_HW_FENCE_FLAG_CREATE_SIGNALED) { - /* hw-fence created for importing client */ - ret = _fence_found(drv_data, hw_fence, client_id, context, - seqno, *hash, pending_child_cnt); - hw_fence_found = true; - } else { - ret = -EALREADY; - } - GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); - if (ret == -EALREADY) - HWFNC_ERR("can't create hfence w/ same ctx:%llu seq:%llu\n", - context, seqno); - - break; - } - /* compare can fail if we have a collision, we will linearly resolve it */ - HWFNC_DBG_H("compare failed for hash:%llu [ctx:%llu seqno:%llu]\n", *hash, - context, seqno); } - - GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); - - if (hw_fence_found && ret) - HWFNC_ERR("failed process_func client:%u op:%s ctx:%llu seq:%llu h:%llu\n", - client_id, _get_op_mode(op_code), context, seqno, *hash); - - /* Increment step for the next loop */ - step++; } - /* If we iterated through the whole list and didn't find the fence, return null */ - if (!hw_fence_found) { - HWFNC_DBG_LUT("fail to process hw-fence op_code:%d step:%llu\n", op_code, step); - hw_fence = NULL; + /* If we iterated through the whole list and didn't find available fences, return null */ + if (!hw_fence_found || ret) { + HWFNC_DBG_LUT("fail to process create hw_fence ctx:%llu seq:%llu\n", + context, seqno); + return NULL; } - HWFNC_DBG_LUT("lookup:%d hw_fence:%pK ctx:%llu seqno:%llu hash:%llu flags:0x%llx\n", - op_code, hw_fence, context, seqno, *hash, hw_fence ? hw_fence->flags : -1); - return hw_fence; } struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence *hw_fences_tbl, u64 context, u64 seqno, u32 client_id, - u32 pending_child_cnt, enum hw_fence_lookup_ops op_code, u64 *hash, u64 flags) + u64 context, u64 seqno, u64 *hash, int (*process_fn)(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hfence, u32 hash)) { - return _hw_fence_lookup_and_process_range(drv_data, hw_fences_tbl, context, seqno, - client_id, pending_child_cnt, op_code, hash, flags, 0, - drv_data->hw_fence_table_entries); + return _hw_fence_lookup_and_process_range(drv_data, context, seqno, hash, 0, + drv_data->hw_fences_tbl_cnt, process_fn); } + struct dma_fence *hw_dma_fence_init(struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno) { @@ -1569,12 +1526,10 @@ int hw_fence_create(struct hw_fence_driver_data *drv_data, u64 context, u64 seqno, u64 *hash) { u32 client_id = hw_fence_client->client_id; - struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; int ret = 0; /* allocate hw fence in table */ - if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl, - context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_CREATE, hash, 0)) { + if (!_hw_fence_lookup_and_create(drv_data, client_id, context, seqno, 0, hash)) { HWFNC_ERR("Fail to create fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); ret = -EINVAL; @@ -1606,14 +1561,12 @@ int hw_fence_destroy(struct hw_fence_driver_data *drv_data, u64 context, u64 seqno) { u32 client_id = hw_fence_client->client_id; - struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; int ret = 0; u64 hash; /* decrement refcount on hw-fence */ - if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl, - context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_DESTROY, &hash, 0)) { - HWFNC_ERR("Fail destroying fence client:%u ctx:%llu seqno:%llu\n", + if (!_hw_fence_lookup_and_process(drv_data, context, seqno, &hash, &_unreserve_hw_fence)) { + HWFNC_ERR("Fail removing ref on fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); ret = -EINVAL; } @@ -1719,9 +1672,8 @@ static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_ if (create) { /* allocate the fence */ - join_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context, - seqno, client_id, pending_child_cnt, HW_FENCE_LOOKUP_OP_CREATE_JOIN, hash, - 0); + join_fence = _hw_fence_lookup_and_create(drv_data, client_id, context, + seqno, pending_child_cnt, hash); if (!join_fence) HWFNC_ERR("Fail to create join fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); @@ -1737,13 +1689,11 @@ struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash) { - struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; struct msm_hw_fence *hw_fence; u32 client_id = hw_fence_client ? hw_fence_client->client_id : ~0; /* find the hw fence */ - hw_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context, - seqno, client_id, 0, HW_FENCE_LOOKUP_OP_FIND_FENCE, hash, 0); + hw_fence = _hw_fence_lookup_and_process(drv_data, context, seqno, hash, &_fence_found); if (!hw_fence) HWFNC_ERR("Fail to find hw fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); @@ -2245,12 +2195,11 @@ static void _signal_fence_if_unsignaled(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *_create_signaled_hw_fence(struct hw_fence_driver_data *drv_data, u32 client_id, struct dma_fence *fence, u64 *hash) { - struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; struct msm_hw_fence *hw_fence; /* create new hw-fence for signaled dma-fence */ - hw_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, - fence->context, fence->seqno, client_id, 0, HW_FENCE_LOOKUP_OP_CREATE, hash, + hw_fence = _hw_fence_lookup_and_create_range(drv_data, client_id, + fence->context, fence->seqno, 0, hash, 0, drv_data->hw_fences_tbl_cnt, MSM_HW_FENCE_FLAG_CREATE_SIGNALED); if (hw_fence) { _signal_fence_if_unsignaled(drv_data, hw_fence, *hash, fence->error, true); @@ -2270,7 +2219,6 @@ struct msm_hw_fence *hw_fence_find_with_dma_fence(struct hw_fence_driver_data *d bool *is_signaled, bool create) { u32 step, end_step, client_id = hw_fence_client ? hw_fence_client->client_id : 0xff; - struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; struct msm_hw_fence *hw_fence = NULL; if (!create && dma_fence_is_signaled(fence)) { @@ -2279,12 +2227,11 @@ struct msm_hw_fence *hw_fence_find_with_dma_fence(struct hw_fence_driver_data *d return NULL; } - for (step = 0; step < drv_data->hw_fence_table_entries; step += HW_FENCE_FIND_THRESHOLD) { - end_step = (step + HW_FENCE_FIND_THRESHOLD > drv_data->hw_fence_table_entries) ? - drv_data->hw_fence_table_entries : step + HW_FENCE_FIND_THRESHOLD; - hw_fence = _hw_fence_lookup_and_process_range(drv_data, hw_fences_tbl, - fence->context, fence->seqno, client_id, 0, HW_FENCE_LOOKUP_OP_FIND_FENCE, - hash, 0, step, end_step); + for (step = 0; step < drv_data->hw_fences_tbl_cnt; step += HW_FENCE_FIND_THRESHOLD) { + end_step = (step + HW_FENCE_FIND_THRESHOLD > drv_data->hw_fences_tbl_cnt) ? + drv_data->hw_fences_tbl_cnt : step + HW_FENCE_FIND_THRESHOLD; + hw_fence = _hw_fence_lookup_and_process_range(drv_data, fence->context, + fence->seqno, hash, step, end_step, _fence_found); if (hw_fence) { /* successfully found backing hw-fence*/ *is_signaled = false; From 24946353d5ae7b8aa83eebcc3eec0fcd02f0efae Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 6 Nov 2024 16:28:40 -0800 Subject: [PATCH 166/166] mm-drivers: hw_fence: modify hash algorithm to use dma-fence pointer The current hash algorithm cannot support multiple active dma-fences with the same context and sequence number. Use dma-fence pointer address to resolve collisions when context and sequence numbers are matching. Track dma-fence pointer address only in HLOS to avoid security issues. Change-Id: Id97e425f32bed94bda7c17d932e0440fa1d245d7 Signed-off-by: Grace An Signed-off-by: lnxdisplay --- hw_fence/include/hw_fence_drv_priv.h | 8 +-- hw_fence/src/hw_fence_drv_debug.c | 4 +- hw_fence/src/hw_fence_drv_interop.c | 2 +- hw_fence/src/hw_fence_drv_priv.c | 75 +++++++++++++++++----------- hw_fence/src/msm_hw_fence.c | 7 +-- 5 files changed, 59 insertions(+), 37 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 7a8a04255f..2097675895 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -381,6 +381,7 @@ struct hw_fence_soccp { * @clients_num: number of supported hw fence clients (configured based on device-tree) * @hw_fences_tbl: pointer to the hw-fences table * @hw_fences_tbl_cnt: number of elements in the hw-fence table + * @hlos_key_tbl: pointer to table of keys tracked by hlos only, same size as the hw-fences table * @events: start address of hw fence debug events * @total_events: total number of hw fence debug events supported * @client_lock_tbl: pointer to the per-client locks table @@ -447,6 +448,7 @@ struct hw_fence_driver_data { /* HW Fences Table VA */ struct msm_hw_fence *hw_fences_tbl; + u64 *hlos_key_tbl; u32 hw_fences_tbl_cnt; /* events */ @@ -632,11 +634,11 @@ void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data, void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client); int hw_fence_create(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, + struct msm_hw_fence_client *hw_fence_client, u64 hlos_key, u64 context, u64 seqno, u64 *hash); int hw_fence_add_callback(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, u64 hash); int hw_fence_destroy(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, + struct msm_hw_fence_client *hw_fence_client, u64 hlos_key, u64 context, u64 seqno); int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 hash); @@ -668,7 +670,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash, u64 client_data); struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, + struct msm_hw_fence_client *hw_fence_client, u64 hlos_key, u64 context, u64 seqno, u64 *hash); struct msm_hw_fence *hw_fence_find_with_dma_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash, diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 5831f15912..ef9cbb7518 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -406,7 +406,7 @@ static ssize_t hw_fence_dbg_tx_and_signal_clients_wr(struct file *file, client_info_src->seqno_cnt++; /* Create hw fence for src client */ - ret = hw_fence_create(drv_data, hw_fence_client, context, seqno, &hash); + ret = hw_fence_create(drv_data, hw_fence_client, context, context, seqno, &hash); if (ret) { HWFNC_ERR("Error creating HW fence\n"); goto exit; @@ -574,7 +574,7 @@ static int dump_single_entry(struct hw_fence_driver_data *drv_data, char *buf, u context = drv_data->debugfs_data.context_rd; seqno = drv_data->debugfs_data.seqno_rd; - hw_fence = msm_hw_fence_find(drv_data, NULL, context, seqno, &hash); + hw_fence = msm_hw_fence_find(drv_data, NULL, context, context, seqno, &hash); if (!hw_fence) { HWFNC_ERR("no valid hfence found for context:%llu seqno:%llu hash:%llu", context, seqno, hash); diff --git a/hw_fence/src/hw_fence_drv_interop.c b/hw_fence/src/hw_fence_drv_interop.c index bc6c2f7250..e165427dba 100644 --- a/hw_fence/src/hw_fence_drv_interop.c +++ b/hw_fence/src/hw_fence_drv_interop.c @@ -190,7 +190,7 @@ int hw_fence_interop_create_fence_from_import(struct synx_import_indv_params *pa /* only synx clients can signal synx fences; no one can signal sw dma-fence from fw */ dummy_client.client_id = is_synx ? HW_FENCE_SYNX_FENCE_CLIENT_ID : HW_FENCE_NATIVE_FENCE_CLIENT_ID; - ret = hw_fence_create(hw_fence_drv_data, &dummy_client, fence->context, + ret = hw_fence_create(hw_fence_drv_data, &dummy_client, (u64)fence, fence->context, fence->seqno, &handle); if (ret) { HWFNC_ERR("failed create fence client:%d ctx:%llu seq:%llu is_synx:%s ret:%d\n", diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 3d09229d78..23f868fd0b 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -661,6 +661,10 @@ static int init_hw_fences_table(struct hw_fence_driver_data *drv_data) drv_data->hw_fences_tbl_cnt = drv_data->hw_fences_mem_desc.size / sizeof(struct msm_hw_fence); + drv_data->hlos_key_tbl = kcalloc(drv_data->hw_fences_tbl_cnt, sizeof(u64), GFP_KERNEL); + if (!drv_data->hlos_key_tbl) + return -ENOMEM; + HWFNC_DBG_INIT("hw_fences_table:0x%pK cnt:%u\n", drv_data->hw_fences_tbl, drv_data->hw_fences_tbl_cnt); @@ -1048,9 +1052,11 @@ static int _hw_fence_iterator_next(struct hw_fence_driver_data *drv_data, return _hw_fence_lookup_next(drv_data, hw_fence, hash, curr_step, 1, m_size); } -static bool _hw_fence_match(struct msm_hw_fence *hw_fence, u64 context, u64 seqno) +static bool _hw_fence_match(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hw_fence, + u64 hash, u64 context, u64 seqno, u64 hlos_key) { - return ((hw_fence->ctx_id == context && hw_fence->seq_id == seqno) ? true : false); + return (hw_fence->ctx_id == context) && (hw_fence->seq_id == seqno) + && (drv_data->hlos_key_tbl[hash] == hlos_key); } /* clears everything but the 'valid' field */ @@ -1097,6 +1103,13 @@ static int _unreserve_hw_fence(struct hw_fence_driver_data *drv_data, /* unreserve this HW fence */ hw_fence->valid = 0; + + /** + * Note: If last hwfence refcount is removed from fctl then this entry will not be + * cleared. This is okay because the entry will be set to a new value at the time + * of next fence creation. + */ + drv_data->hlos_key_tbl[hash] = 0; } HWFNC_DBG_LUT("Removed ref on fence alloc:%d ctx:%llu seq:%llu refcount:0x%x hash:%u\n", @@ -1145,7 +1158,7 @@ int hw_fence_destroy_refcount(struct hw_fence_driver_data *drv_data, u64 hash, u /* This function must be called with the hw fence lock */ static int _reserve_hw_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hw_fence, u32 client_id, u64 context, - u64 seqno, u32 hash, u32 pending_child_cnt) + u64 seqno, u32 hash, u32 pending_child_cnt, u64 hlos_key) { _cleanup_hw_fence(hw_fence); @@ -1161,6 +1174,8 @@ static int _reserve_hw_fence(struct hw_fence_driver_data *drv_data, hw_fence->pending_child_cnt = pending_child_cnt; + drv_data->hlos_key_tbl[hash] = hlos_key; + HWFNC_DBG_LUT("Reserved fence client:%d ctx:%llu seq:%llu pending_child:%u hash:%u\n", client_id, context, seqno, pending_child_cnt, hash); @@ -1188,7 +1203,7 @@ static int _fence_found(struct hw_fence_driver_data *drv_data, struct msm_hw_fen } struct msm_hw_fence *_hw_fence_lookup_and_create_range(struct hw_fence_driver_data *drv_data, - u32 client_id, u64 context, u64 seqno, u32 pending_child_cnt, u64 *hash, + u32 client_id, u64 hlos_key, u64 context, u64 seqno, u32 pending_child_cnt, u64 *hash, u32 start_step, u32 end_step, u64 flags) { struct msm_hw_fence *hw_fence; @@ -1207,7 +1222,7 @@ struct msm_hw_fence *_hw_fence_lookup_and_create_range(struct hw_fence_driver_da if (!hw_fence->valid) { /* Process the hw fence found by the algorithm */ ret = _reserve_hw_fence(drv_data, hw_fence, client_id, context, seqno, - *hash, pending_child_cnt); + *hash, pending_child_cnt, hlos_key); /* update memory table with processing */ wmb(); @@ -1216,7 +1231,7 @@ struct msm_hw_fence *_hw_fence_lookup_and_create_range(struct hw_fence_driver_da client_id, context, seqno, *hash, step); hw_fence_found = true; - } else if (_hw_fence_match(hw_fence, context, seqno)) { + } else if (_hw_fence_match(drv_data, hw_fence, *hash, context, seqno, hlos_key)) { hw_fence_found = true; if (flags & MSM_HW_FENCE_FLAG_CREATE_SIGNALED) ret = _fence_found(drv_data, hw_fence, *hash); @@ -1229,7 +1244,8 @@ struct msm_hw_fence *_hw_fence_lookup_and_create_range(struct hw_fence_driver_da } if (ret == -EALREADY) { - HWFNC_ERR("can't create hfence w/ same ctx:%llu seq:%llu\n", context, seqno); + HWFNC_ERR("can't create hfence w/ same ctx:%llu seq:%llu hlos_key:0x%pK\n", + context, seqno, (context == hlos_key) ? NULL : (void *)hlos_key); return NULL; } @@ -1244,14 +1260,14 @@ struct msm_hw_fence *_hw_fence_lookup_and_create_range(struct hw_fence_driver_da } struct msm_hw_fence *_hw_fence_lookup_and_create(struct hw_fence_driver_data *drv_data, - u32 client_id, u64 context, u64 seqno, u32 pending_child_cnt, u64 *hash) + u32 client_id, u64 hlos_key, u64 context, u64 seqno, u32 pending_child_cnt, u64 *hash) { - return _hw_fence_lookup_and_create_range(drv_data, client_id, context, seqno, + return _hw_fence_lookup_and_create_range(drv_data, client_id, hlos_key, context, seqno, pending_child_cnt, hash, 0, drv_data->hw_fences_tbl_cnt, 0); } struct msm_hw_fence *_hw_fence_lookup_and_process_range(struct hw_fence_driver_data *drv_data, - u64 context, u64 seqno, u64 *hash, u32 start_step, u32 end_step, + u64 hlos_key, u64 context, u64 seqno, u64 *hash, u32 start_step, u32 end_step, int (*process_fn)(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hfence, u32 hash)) { @@ -1268,7 +1284,7 @@ struct msm_hw_fence *_hw_fence_lookup_and_process_range(struct hw_fence_driver_d for_each_hw_fence(drv_data, &hw_fence, hash, context, seqno, start_step, end_step, step, hw_fence_found) { - if (_hw_fence_match(hw_fence, context, seqno)) { + if (_hw_fence_match(drv_data, hw_fence, *hash, context, seqno, hlos_key)) { /* Process the hw fence found by the algorithm */ ret = process_fn(drv_data, hw_fence, *hash); HWFNC_DBG_L("ctx:%llu seqno:%llu hash:%llu step:%u\n", context, seqno, @@ -1288,10 +1304,11 @@ struct msm_hw_fence *_hw_fence_lookup_and_process_range(struct hw_fence_driver_d } struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *drv_data, - u64 context, u64 seqno, u64 *hash, int (*process_fn)(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence *hfence, u32 hash)) + u64 hlos_key, u64 context, u64 seqno, u64 *hash, + int (*process_fn)(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hfence, + u32 hash)) { - return _hw_fence_lookup_and_process_range(drv_data, context, seqno, hash, 0, + return _hw_fence_lookup_and_process_range(drv_data, hlos_key, context, seqno, hash, 0, drv_data->hw_fences_tbl_cnt, process_fn); } @@ -1397,7 +1414,7 @@ struct dma_fence *hw_fence_internal_dma_fence_create(struct hw_fence_driver_data return ERR_PTR(-EINVAL); } - ret = hw_fence_create(drv_data, hw_fence_client, context, seqno, hash); + ret = hw_fence_create(drv_data, hw_fence_client, (u64)fence, context, seqno, hash); if (ret) { HWFNC_ERR("failed to back internal dma-fence client:%d ctx:%llu seq:%llu\n", hw_fence_client->client_id, context, seqno); @@ -1522,14 +1539,14 @@ static int hw_fence_dma_fence_table_del(struct hw_fence_driver_data *drv_data, u } int hw_fence_create(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, - u64 context, u64 seqno, u64 *hash) + struct msm_hw_fence_client *hw_fence_client, u64 hlos_key, u64 context, + u64 seqno, u64 *hash) { u32 client_id = hw_fence_client->client_id; int ret = 0; /* allocate hw fence in table */ - if (!_hw_fence_lookup_and_create(drv_data, client_id, context, seqno, 0, hash)) { + if (!_hw_fence_lookup_and_create(drv_data, client_id, hlos_key, context, seqno, 0, hash)) { HWFNC_ERR("Fail to create fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); ret = -EINVAL; @@ -1557,15 +1574,15 @@ int hw_fence_create(struct hw_fence_driver_data *drv_data, } int hw_fence_destroy(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, - u64 context, u64 seqno) + struct msm_hw_fence_client *hw_fence_client, u64 hlos_key, u64 context, u64 seqno) { u32 client_id = hw_fence_client->client_id; int ret = 0; u64 hash; /* decrement refcount on hw-fence */ - if (!_hw_fence_lookup_and_process(drv_data, context, seqno, &hash, &_unreserve_hw_fence)) { + if (!_hw_fence_lookup_and_process(drv_data, hlos_key, context, seqno, &hash, + &_unreserve_hw_fence)) { HWFNC_ERR("Fail removing ref on fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); ret = -EINVAL; @@ -1672,7 +1689,7 @@ static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_ if (create) { /* allocate the fence */ - join_fence = _hw_fence_lookup_and_create(drv_data, client_id, context, + join_fence = _hw_fence_lookup_and_create(drv_data, client_id, (u64)array, context, seqno, pending_child_cnt, hash); if (!join_fence) HWFNC_ERR("Fail to create join fence client:%u ctx:%llu seqno:%llu\n", @@ -1686,14 +1703,15 @@ static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_ } struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, - u64 context, u64 seqno, u64 *hash) + struct msm_hw_fence_client *hw_fence_client, u64 hlos_key, u64 context, + u64 seqno, u64 *hash) { struct msm_hw_fence *hw_fence; u32 client_id = hw_fence_client ? hw_fence_client->client_id : ~0; /* find the hw fence */ - hw_fence = _hw_fence_lookup_and_process(drv_data, context, seqno, hash, &_fence_found); + hw_fence = _hw_fence_lookup_and_process(drv_data, hlos_key, context, seqno, hash, + &_fence_found); if (!hw_fence) HWFNC_ERR("Fail to find hw fence client:%u ctx:%llu seqno:%llu\n", client_id, context, seqno); @@ -2022,7 +2040,8 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, hw_fence = hw_fence_find_with_dma_fence(drv_data, hw_fence_client, fence, hash, &is_signaled, true); else - hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, hash); + hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, context, seqno, + hash); if (!hw_fence) { HWFNC_ERR("Cannot find fence!\n"); return -EINVAL; @@ -2198,7 +2217,7 @@ struct msm_hw_fence *_create_signaled_hw_fence(struct hw_fence_driver_data *drv_ struct msm_hw_fence *hw_fence; /* create new hw-fence for signaled dma-fence */ - hw_fence = _hw_fence_lookup_and_create_range(drv_data, client_id, + hw_fence = _hw_fence_lookup_and_create_range(drv_data, client_id, (u64)fence, fence->context, fence->seqno, 0, hash, 0, drv_data->hw_fences_tbl_cnt, MSM_HW_FENCE_FLAG_CREATE_SIGNALED); if (hw_fence) { @@ -2230,7 +2249,7 @@ struct msm_hw_fence *hw_fence_find_with_dma_fence(struct hw_fence_driver_data *d for (step = 0; step < drv_data->hw_fences_tbl_cnt; step += HW_FENCE_FIND_THRESHOLD) { end_step = (step + HW_FENCE_FIND_THRESHOLD > drv_data->hw_fences_tbl_cnt) ? drv_data->hw_fences_tbl_cnt : step + HW_FENCE_FIND_THRESHOLD; - hw_fence = _hw_fence_lookup_and_process_range(drv_data, fence->context, + hw_fence = _hw_fence_lookup_and_process_range(drv_data, (u64)fence, fence->context, fence->seqno, hash, step, end_step, _fence_found); if (hw_fence) { /* successfully found backing hw-fence*/ diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 85d70561e3..93d45886b5 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -291,7 +291,7 @@ int msm_hw_fence_create(void *client_handle, } /* Create the HW Fence, i.e. add entry in the Global Table for this Fence */ - ret = hw_fence_create(hw_fence_drv_data, hw_fence_client, fence->context, + ret = hw_fence_create(hw_fence_drv_data, hw_fence_client, (u64)fence, fence->context, fence->seqno, params->handle); if (ret) { HWFNC_ERR("Error creating HW fence\n"); @@ -358,7 +358,7 @@ int msm_hw_fence_destroy(void *client_handle, } /* Destroy the HW Fence, i.e. remove entry in the Global Table for the Fence */ - ret = hw_fence_destroy(hw_fence_drv_data, hw_fence_client, + ret = hw_fence_destroy(hw_fence_drv_data, hw_fence_client, (u64)fence, fence->context, fence->seqno); if (ret) { HWFNC_ERR("Error destroying the HW fence\n"); @@ -758,7 +758,7 @@ int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence) } hw_fence_client = (struct msm_hw_fence_client *)client_handle; - hw_fence = msm_hw_fence_find(hw_fence_drv_data, hw_fence_client, fence->context, + hw_fence = msm_hw_fence_find(hw_fence_drv_data, hw_fence_client, (u64)fence, fence->context, fence->seqno, &hash); if (!hw_fence) { HWFNC_ERR("failed to find hw-fence client_id:%d fence:0x%pK ctx:%llu seqno:%llu\n", @@ -908,6 +908,7 @@ static int msm_hw_fence_remove(struct platform_device *pdev) /* free memory allocations as part of hw_fence_drv_data */ kfree(hw_fence_drv_data->ipc_clients_table); kfree(hw_fence_drv_data->hw_fence_client_queue_size); + kfree(hw_fence_drv_data->hlos_key_tbl); if (hw_fence_drv_data->cpu_addr_cookie) dma_free_attrs(hw_fence_drv_data->dev, hw_fence_drv_data->size, hw_fence_drv_data->cpu_addr_cookie, hw_fence_drv_data->res.start,