From 09be32bc717182346c1a9ec723c7893875d38900 Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Wed, 28 Sep 2022 18:05:59 -0700 Subject: [PATCH 02/35] mm-sys-kernel: build framework files makefiles for android builds Change-Id: Ideef4c6424278c62db27b44a542694197fa8018e Signed-off-by: Amol Jadi --- ubwcp_kernel_product_board.mk | 0 ubwcp_kernel_vendor_board.mk | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 ubwcp_kernel_product_board.mk create mode 100644 ubwcp_kernel_vendor_board.mk diff --git a/ubwcp_kernel_product_board.mk b/ubwcp_kernel_product_board.mk new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ubwcp_kernel_vendor_board.mk b/ubwcp_kernel_vendor_board.mk new file mode 100644 index 0000000000..e69de29bb2 From 00184485c7a519e2fee2af5ff9c1f0804190fee3 Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Fri, 23 Sep 2022 13:44:39 -0700 Subject: [PATCH 03/35] ubwcp: driver initial version Implements driver to interface with ubwcp hw block Change-Id: I092e45acfedd01978c383bc596e48da23f3e873e Signed-off-by: Amol Jadi Signed-off-by: Liam Mark --- ubwcp/Android.bp | 36 + ubwcp/Android.mk | 10 + ubwcp/Kbuild | 4 + ubwcp/Makefile | 19 + ubwcp/include/uapi/ubwcp_ioctl.h | 118 ++ ubwcp/ubwcp.c | 2487 ++++++++++++++++++++++++++++++ ubwcp/ubwcp.h | 65 + ubwcp/ubwcp_hw.c | 360 +++++ ubwcp/ubwcp_hw.h | 73 + ubwcp/ubwcp_kernel_headers.py | 94 ++ ubwcp_kernel_product_board.mk | 2 + ubwcp_kernel_vendor_board.mk | 2 + 12 files changed, 3270 insertions(+) create mode 100644 ubwcp/Android.bp create mode 100644 ubwcp/Android.mk create mode 100644 ubwcp/Kbuild create mode 100644 ubwcp/Makefile create mode 100644 ubwcp/include/uapi/ubwcp_ioctl.h create mode 100644 ubwcp/ubwcp.c create mode 100644 ubwcp/ubwcp.h create mode 100644 ubwcp/ubwcp_hw.c create mode 100644 ubwcp/ubwcp_hw.h create mode 100644 ubwcp/ubwcp_kernel_headers.py diff --git a/ubwcp/Android.bp b/ubwcp/Android.bp new file mode 100644 index 0000000000..c51f9a23cf --- /dev/null +++ b/ubwcp/Android.bp @@ -0,0 +1,36 @@ +headers_src = [ + "include/uapi/*.h", +] + +ubwcp_headers_out = [ + "ubwcp_ioctl.h", +] + +ubwcp_kernel_headers_verbose = "--verbose " +genrule { + name: "qti_generate_ubwcp_kernel_headers", + tools: [ + "headers_install.sh", + "unifdef" + ], + tool_files: [ + "ubwcp_kernel_headers.py", + ], + srcs: headers_src, + cmd: "python3 $(location ubwcp_kernel_headers.py) " + + ubwcp_kernel_headers_verbose + + "--header_arch arm64 " + + "--gen_dir $(genDir) " + + "--ubwcp_include_uapi $(locations include/uapi/*.h) " + + "--unifdef $(location unifdef) " + + "--headers_install $(location headers_install.sh)", + out: ubwcp_headers_out, +} + +cc_library_headers { + name: "qti_ubwcp_kernel_headers", + generated_headers: ["qti_generate_ubwcp_kernel_headers"], + export_generated_headers: ["qti_generate_ubwcp_kernel_headers"], + vendor: true, + recovery_available: true +} diff --git a/ubwcp/Android.mk b/ubwcp/Android.mk new file mode 100644 index 0000000000..0f9256867d --- /dev/null +++ b/ubwcp/Android.mk @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only + +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) +# For incremental compilation +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := ubwcpx.ko +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +include $(DLKM_DIR)/Build_external_kernelmodule.mk diff --git a/ubwcp/Kbuild b/ubwcp/Kbuild new file mode 100644 index 0000000000..2b69d972d6 --- /dev/null +++ b/ubwcp/Kbuild @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only + +ubwcpx-objs := ubwcp.o ubwcp_hw.o +obj-m += ubwcpx.o diff --git a/ubwcp/Makefile b/ubwcp/Makefile new file mode 100644 index 0000000000..6b590dea7a --- /dev/null +++ b/ubwcp/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only + +#ifeq ($(KP_MODULE_ROOT),) +#KP_MODULE_ROOT=$(KERNEL_SRC)/$(M) +#endif + +#KBUILD_OPTIONS+=KBUILD_DTC_INCLUDE=$(KP_MODULE_ROOT) + +# Kbuild assumes devicetree source lives in arch/arm64/boot/dts, but perhaps it +# lives in some other directory in your project. Specify it with KBUILD_EXTMOD_DTS +# KBUILD_OPTIONS+=KBUILD_EXTMOD_DTS=camera + +all: modules # dtbs + +clean: + $(MAKE) -C $(KERNEL_SRC) M=$(M) clean + +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) diff --git a/ubwcp/include/uapi/ubwcp_ioctl.h b/ubwcp/include/uapi/ubwcp_ioctl.h new file mode 100644 index 0000000000..7a13ab54c5 --- /dev/null +++ b/ubwcp/include/uapi/ubwcp_ioctl.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UBWCP_IOCTL_H_ +#define __UBWCP_IOCTL_H_ + +#include +#include + +#define UBWCP_IOCTL_SET_BUF_ATTR _IOW('U', 1, struct ubwcp_ioctl_buffer_attrs) +#define UBWCP_IOCTL_GET_HW_VER _IOR('U', 2, struct ubwcp_ioctl_hw_version) + + +enum ubwcp_image_format { + UBWCP_LINEAR = 0, + UBWCP_RGBA8888, + UBWCP_NV12, + UBWCP_NV12_Y, + UBWCP_NV12_UV, + UBWCP_NV124R, + UBWCP_NV124R_Y, + UBWCP_NV124R_UV, + UBWCP_TP10, + UBWCP_TP10_Y, + UBWCP_TP10_UV, + UBWCP_P010, + UBWCP_P010_Y, + UBWCP_P010_UV, + UBWCP_P016, + UBWCP_P016_Y, + UBWCP_P016_UV, +}; + +enum ubwcp_compression_type { + UBWCP_COMPRESSION_LOSSLESS = 0, +}; + +enum ubwcp_subsample { + UBWCP_SUBSAMPLE_4_2_0 = 0, +}; + +#define UBWCP_SUBSYSTEM_TARGET_CPU (1 << 0) + +/** + * @image_format: image format + * @major_ubwc_ver: set to 0. This is not HW version. + * @minor_ubwc_ver: set to 0. This is not HW version. + * @compression_type: only lossless is supported. + * @lossy_params: set to 0 + * @width: image width (pixels) + * @height: image height (pixels) + * @stride: image stride (bytes) + * @scanlines: number of scanlines + * @planar_padding: padding between Y and UV planes (bytes) + * @subsample: only 4:2:0 is supported + * @sub_system_target: only CPU is supported + * @y_offset: set to 0 + * @batch_size: set to 1 + * + * All pad[x] and unused[x] fields must be set to 0 + */ +struct ubwcp_buffer_attrs { + + __u16 image_format; /* enum ubwcp_image_format */ + __u16 major_ubwc_ver; /* per-buffer version: must be set to 0 */ + __u16 minor_ubwc_ver; /* per-buffer version: must be set to 0 */ + __u16 compression_type; /* enum ubwcp_compression_type */ + + __u64 lossy_params; /* must be set to 0 */ + + __u32 width; + __u32 height; + __u32 stride; + __u32 scanlines; + + __u32 planar_padding; + __u32 subsample; /* enum enum ubwcp_subsample */ + __u32 sub_system_target;/* bit mask: UBWCP_SUBSYSTEM_TARGET_XXX */ + __u32 y_offset; /* must be set to 0 */ + + __u32 batch_size; /* only size supported: 1 */ + __u32 unused1; + + __u32 unused2; + __u32 unused3; + __u32 unused4; + __u32 unused5; + + __u32 unused6; + __u32 unused7; + __u32 unused8; + __u32 unused9; +}; + +/** + * @fd: dma_buf file descriptor for the buffer whose + * attributes are specified + * @attr: ubwcp buffer attributes + */ +struct ubwcp_ioctl_buffer_attrs { + __u32 fd; + __u32 pad; + struct ubwcp_buffer_attrs attr; +}; + +/** + * ubwcp hardware version + * @major: major version + * @minor: minor version + */ +struct ubwcp_ioctl_hw_version { + __u32 major; + __u32 minor; +}; + +#endif /* __UBWCP_IOCTL_H_ */ diff --git a/ubwcp/ubwcp.c b/ubwcp/ubwcp.c new file mode 100644 index 0000000000..d2edfea97d --- /dev/null +++ b/ubwcp/ubwcp.c @@ -0,0 +1,2487 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_IMPORT_NS(DMA_BUF); + +#include "ubwcp.h" +#include "ubwcp_hw.h" +#include "include/uapi/ubwcp_ioctl.h" + +#define UBWCP_NUM_DEVICES 1 +#define UBWCP_DEVICE_NAME "ubwcp" + +#define UBWCP_BUFFER_DESC_OFFSET 64 +#define UBWCP_BUFFER_DESC_COUNT 256 + + +#define CACHE_ADDR(x) ((x) >> 6) +#define PAGE_ADDR(x) ((x) >> 12) +#define UBWCP_ALIGN(_x, _y) ((((_x) + (_y) - 1)/(_y))*(_y)) + + +//#define DBG(fmt, args...) +//#define DBG_BUF_ATTR(fmt, args...) +#define DBG_BUF_ATTR(fmt, args...) do { if (ubwcp_debug_trace_enable) \ + pr_err("ubwcp: %s(): " fmt "\n", __func__, ##args); \ + } while (0) +#define DBG(fmt, args...) do { if (ubwcp_debug_trace_enable) \ + pr_err("ubwcp: %s(): " fmt "\n", __func__, ##args); \ + } while (0) +#define ERR(fmt, args...) pr_err("ubwcp: %s(): ~~~ERROR~~~: " fmt "\n", __func__, ##args) + +#define FENTRY() DBG("ubwcp: %s()", __func__) + + +#define META_DATA_PITCH_ALIGN 64 +#define META_DATA_HEIGHT_ALIGN 16 +#define META_DATA_SIZE_ALIGN 4096 +#define PIXEL_DATA_SIZE_ALIGN 4096 + +struct ubwcp_desc { + int idx; + void *ptr; +}; + +/* TBD: confirm size of width/height */ +struct ubwcp_dimension { + u16 width; + u16 height; +}; + +struct ubwcp_plane_info { + u16 pixel_bytes; + u16 per_pixel; + struct ubwcp_dimension tilesize_p; /* pixels */ + struct ubwcp_dimension macrotilesize_p; /* pixels */ +}; + +struct ubwcp_image_format_info { + u16 planes; + struct ubwcp_plane_info p_info[2]; +}; + +enum ubwcp_std_image_format { + RGBA = 0, + NV12 = 1, + NV124R = 2, + P010 = 3, + TP10 = 4, + P016 = 5, + INFO_FORMAT_LIST_SIZE, + STD_IMAGE_FORMAT_INVALID = 0xFF +}; + +struct ubwcp_driver { + /* cdev related */ + dev_t devt; + struct class *dev_class; //sysfs dev class + struct device *dev_sys; //sysfs dev + struct cdev cdev; //char dev + + /* debugfs */ + struct dentry *debugfs_root; + + /* ubwcp devices */ + struct device *dev; //ubwcp device + struct device *dev_desc_cb; //smmu dev for descriptors + struct device *dev_buf_cb; //smmu dev for ubwcp buffers + + void __iomem *base; //ubwcp base address + struct regulator *vdd; + + /* interrupts */ + int irq_range_ck_rd; + int irq_range_ck_wr; + int irq_encode; + int irq_decode; + + /* ula address pool */ + u64 ula_pool_base; + u64 ula_pool_size; + struct gen_pool *ula_pool; + + configure_mmap mmap_config_fptr; + + /* HW version */ + u32 hw_ver_major; + u32 hw_ver_minor; + + /* keep track of all buffers. hash table index'ed using dma_buf ptr. + * 2**8 = 256 hash values + */ + DECLARE_HASHTABLE(buf_table, 8); + + /* buffer descriptor */ + void *buffer_desc_base; /* CPU address */ + dma_addr_t buffer_desc_dma_handle; /* dma address */ + size_t buffer_desc_size; + struct ubwcp_desc desc_list[UBWCP_BUFFER_DESC_COUNT]; + + struct ubwcp_image_format_info format_info[INFO_FORMAT_LIST_SIZE]; + + struct mutex desc_lock; /* allocate/free descriptors */ + struct mutex buf_table_lock; /* add/remove dma_buf into list of managed bufffers */ + struct mutex ula_lock; /* allocate/free ula */ + struct mutex ubwcp_flush_lock; /* ubwcp flush */ + struct mutex hw_range_ck_lock; /* range ck */ +}; + +struct ubwcp_buf { + struct hlist_node hnode; + struct ubwcp_driver *ubwcp; + struct ubwcp_buffer_attrs buf_attr; + bool perm; + struct ubwcp_desc *desc; + bool buf_attr_set; + bool locked; + enum dma_data_direction lock_dir; + int lock_count; + + /* dma_buf info */ + struct dma_buf *dma_buf; + struct dma_buf_attachment *attachment; + struct sg_table *sgt; + + /* ula info */ + phys_addr_t ula_pa; + size_t ula_size; + + /* meta metadata */ + struct ubwcp_hw_meta_metadata mmdata; + struct mutex lock; +}; + + +static struct ubwcp_driver *me; +static int error_print_count; +u32 ubwcp_debug_trace_enable; + +static struct ubwcp_driver *ubwcp_get_driver(void) +{ + if (!me) + WARN(1, "ubwcp: driver ptr requested but driver not initialized"); + + return me; +} + +static void image_format_init(struct ubwcp_driver *ubwcp) +{ /* planes, bytes/p, Tp , MTp */ + ubwcp->format_info[RGBA] = (struct ubwcp_image_format_info) + {1, {{4, 1, {16, 4}, {64, 16}}}}; + ubwcp->format_info[NV12] = (struct ubwcp_image_format_info) + {2, {{1, 1, {32, 8}, {128, 32}}, + {2, 1, {16, 8}, { 64, 32}}}}; + ubwcp->format_info[NV124R] = (struct ubwcp_image_format_info) + {2, {{1, 1, {64, 4}, {256, 16}}, + {2, 1, {32, 4}, {128, 16}}}}; + ubwcp->format_info[P010] = (struct ubwcp_image_format_info) + {2, {{2, 1, {32, 4}, {128, 16}}, + {4, 1, {16, 4}, { 64, 16}}}}; + ubwcp->format_info[TP10] = (struct ubwcp_image_format_info) + {2, {{4, 3, {48, 4}, {192, 16}}, + {8, 3, {24, 4}, { 96, 16}}}}; + ubwcp->format_info[P016] = (struct ubwcp_image_format_info) + {2, {{2, 1, {32, 4}, {128, 16}}, + {4, 1, {16, 4}, { 64, 16}}}}; +} + +static void ubwcp_buf_desc_list_init(struct ubwcp_driver *ubwcp) +{ + int idx; + struct ubwcp_desc *desc_list = ubwcp->desc_list; + + for (idx = 0; idx < UBWCP_BUFFER_DESC_COUNT; idx++) { + desc_list[idx].idx = -1; + desc_list[idx].ptr = NULL; + } +} + +/* UBWCP Power control */ +static int ubwcp_power(struct ubwcp_driver *ubwcp, bool enable) +{ + int ret = 0; + + if (enable) { + ret = regulator_enable(ubwcp->vdd); + if (ret < 0) { + ERR("regulator_enable failed: %d", ret); + ret = -1; + } else { + DBG("regulator_enable() success"); + } + } else { + ret = regulator_disable(ubwcp->vdd); + if (ret < 0) { + ERR("regulator_disable failed: %d", ret); + ret = -1; + } else { + DBG("regulator_disable() success"); + } + } + return ret; +} + + +static int ubwcp_flush(struct ubwcp_driver *ubwcp) +{ + int ret = 0; + + mutex_lock(&ubwcp->ubwcp_flush_lock); + ret = ubwcp_hw_flush(ubwcp->base); + mutex_unlock(&ubwcp->ubwcp_flush_lock); + if (ret != 0) + WARN(1, "ubwcp_hw_flush() failed!"); + + return ret; +} + + +/* get dma_buf ptr for the given dma_buf fd */ +struct dma_buf *ubwcp_dma_buf_fd_to_dma_buf(int dma_buf_fd) +{ + struct dma_buf *dmabuf; + + /* TBD: dma_buf_get() results in taking ref to buf and it won't ever get + * free'ed until ref count goes to 0. So we must reduce the ref count + * immediately after we find our corresponding ubwcp_buf. + */ + dmabuf = dma_buf_get(dma_buf_fd); + if (IS_ERR(dmabuf)) { + ERR("dmabuf ptr not found for dma_buf_fd = %d", dma_buf_fd); + return NULL; + } + + dma_buf_put(dmabuf); + + return dmabuf; +} +EXPORT_SYMBOL(ubwcp_dma_buf_fd_to_dma_buf); + + +/* get ubwcp_buf corresponding to the given dma_buf */ +static struct ubwcp_buf *dma_buf_to_ubwcp_buf(struct dma_buf *dmabuf) +{ + struct ubwcp_buf *buf = NULL; + struct ubwcp_driver *ubwcp = ubwcp_get_driver(); + + if (!dmabuf || !ubwcp) + return NULL; + + mutex_lock(&ubwcp->buf_table_lock); + /* look up ubwcp_buf corresponding to this dma_buf */ + hash_for_each_possible(ubwcp->buf_table, buf, hnode, (u64)dmabuf) { + if (buf->dma_buf == dmabuf) + break; + } + mutex_unlock(&ubwcp->buf_table_lock); + + return buf; +} + + +/* return ubwcp hardware version */ +int ubwcp_get_hw_version(struct ubwcp_ioctl_hw_version *ver) +{ + struct ubwcp_driver *ubwcp; + + FENTRY(); + + if (!ver) { + ERR("invalid version ptr"); + return -EINVAL; + } + + ubwcp = ubwcp_get_driver(); + if (!ubwcp) + return -1; + + ver->major = ubwcp->hw_ver_major; + ver->minor = ubwcp->hw_ver_minor; + return 0; +} +EXPORT_SYMBOL(ubwcp_get_hw_version); + +/** + * + * Initialize ubwcp buffer for the given dma_buf. This + * initializes ubwcp internal data structures and possibly hw to + * use ubwcp for this buffer. + * + * @param dmabuf : ptr to the buffer to be configured for ubwcp + * + * @return int : 0 on success, otherwise error code + */ +static int ubwcp_init_buffer(struct dma_buf *dmabuf) +{ + int ret = 0; + int nid; + struct ubwcp_buf *buf; + struct ubwcp_driver *ubwcp = ubwcp_get_driver(); + + FENTRY(); + + if (!ubwcp) + return -1; + + if (!dmabuf) { + ERR("NULL dmabuf input ptr"); + return -EINVAL; + } + + if (dma_buf_to_ubwcp_buf(dmabuf)) { + ERR("dma_buf already initialized for ubwcp"); + return -EEXIST; + } + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) { + ERR("failed to alloc for new ubwcp_buf"); + return -ENOMEM; + } + + mutex_init(&buf->lock); + buf->dma_buf = dmabuf; + buf->ubwcp = ubwcp; + + mutex_lock(&ubwcp->buf_table_lock); + if (hash_empty(ubwcp->buf_table)) { + + ret = ubwcp_power(ubwcp, true); + if (ret) + goto err_power_on; + + nid = memory_add_physaddr_to_nid(ubwcp->ula_pool_base); + DBG("calling add_memory()..."); + ret = add_memory(nid, ubwcp->ula_pool_base, ubwcp->ula_pool_size, MHP_NONE); + if (ret) { + ERR("add_memory() failed st:0x%lx sz:0x%lx err: %d", + ubwcp->ula_pool_base, + ubwcp->ula_pool_size, + ret); + goto err_add_memory; + } else { + DBG("add_memory() ula_pool_base:0x%llx, size:0x%zx, kernel addr:0x%p", + ubwcp->ula_pool_base, + ubwcp->ula_pool_size, + page_to_virt(pfn_to_page(PFN_DOWN(ubwcp->ula_pool_base)))); + } + } + hash_add(ubwcp->buf_table, &buf->hnode, (u64)buf->dma_buf); + mutex_unlock(&ubwcp->buf_table_lock); + return ret; + +err_add_memory: + ubwcp_power(ubwcp, false); +err_power_on: + mutex_unlock(&ubwcp->buf_table_lock); + kfree(buf); + if (!ret) + ret = -1; + return ret; +} + +static void dump_attributes(struct ubwcp_buffer_attrs *attr) +{ + DBG_BUF_ATTR(""); + DBG_BUF_ATTR("image_format: %d", attr->image_format); + DBG_BUF_ATTR("major_ubwc_ver: %d", attr->major_ubwc_ver); + DBG_BUF_ATTR("minor_ubwc_ver: %d", attr->minor_ubwc_ver); + DBG_BUF_ATTR("compression_type: %d", attr->compression_type); + DBG_BUF_ATTR("lossy_params: %llu", attr->lossy_params); + DBG_BUF_ATTR("width: %d", attr->width); + DBG_BUF_ATTR("height: %d", attr->height); + DBG_BUF_ATTR("stride: %d", attr->stride); + DBG_BUF_ATTR("scanlines: %d", attr->scanlines); + DBG_BUF_ATTR("planar_padding: %d", attr->planar_padding); + DBG_BUF_ATTR("subsample: %d", attr->subsample); + DBG_BUF_ATTR("sub_system_target: %d", attr->sub_system_target); + DBG_BUF_ATTR("y_offset: %d", attr->y_offset); + DBG_BUF_ATTR("batch_size: %d", attr->batch_size); + DBG_BUF_ATTR(""); +} + +/* validate buffer attributes */ +static bool ubwcp_buf_attrs_valid(struct ubwcp_buffer_attrs *attr) +{ + bool valid_format; + + switch (attr->image_format) { + case UBWCP_LINEAR: + case UBWCP_RGBA8888: + case UBWCP_NV12: + case UBWCP_NV12_Y: + case UBWCP_NV12_UV: + case UBWCP_NV124R: + case UBWCP_NV124R_Y: + case UBWCP_NV124R_UV: + case UBWCP_TP10: + case UBWCP_TP10_Y: + case UBWCP_TP10_UV: + case UBWCP_P010: + case UBWCP_P010_Y: + case UBWCP_P010_UV: + case UBWCP_P016: + case UBWCP_P016_Y: + case UBWCP_P016_UV: + valid_format = true; + break; + default: + valid_format = false; + } + + if (!valid_format) { + ERR("invalid image format: %d", attr->image_format); + goto err; + } + + if (attr->major_ubwc_ver || attr->minor_ubwc_ver) { + ERR("major/minor ubwc ver must be 0. major: %d minor: %d", + attr->major_ubwc_ver, attr->minor_ubwc_ver); + goto err; + } + + if (attr->compression_type != UBWCP_COMPRESSION_LOSSLESS) { + ERR("compression_type is not valid: %d", + attr->compression_type); + goto err; + } + + if (attr->lossy_params != 0) { + ERR("lossy_params is not valid: %d", attr->lossy_params); + goto err; + } + + //TBD: some upper limit for width? + if (attr->width > 10*1024) { + ERR("width is invalid (above upper limit): %d", attr->width); + goto err; + } + + //TBD: some upper limit for height? + if (attr->height > 10*1024) { + ERR("height is invalid (above upper limit): %d", attr->height); + goto err; + } + + + /* TBD: what's the upper limit for stride? 8K is likely too high. */ + if (!IS_ALIGNED(attr->stride, 64) || + (attr->stride < attr->width) || + (attr->stride > 4*8192)) { + ERR("stride is not valid (aligned to 64 and <= 8192): %d", + attr->stride); + goto err; + } + + /* TBD: currently assume height + 10. Replace 10 with right num from camera. */ + if ((attr->scanlines < attr->height) || + (attr->scanlines > attr->height + 10)) { + ERR("scanlines is not valid - height: %d scanlines: %d", + attr->height, attr->scanlines); + goto err; + } + + if (attr->planar_padding > 4096) { + ERR("planar_padding is not valid. (<= 4096): %d", + attr->planar_padding); + goto err; + } + + if (attr->subsample != UBWCP_SUBSAMPLE_4_2_0) { + ERR("subsample is not valid: %d", attr->subsample); + goto err; + } + + if (attr->sub_system_target & ~UBWCP_SUBSYSTEM_TARGET_CPU) { + ERR("sub_system_target other that CPU is not supported: %d", + attr->sub_system_target); + goto err; + } + + if (!(attr->sub_system_target & UBWCP_SUBSYSTEM_TARGET_CPU)) { + ERR("sub_system_target is not set to CPU: %d", + attr->sub_system_target); + goto err; + } + + if (attr->y_offset != 0) { + ERR("y_offset is not valid: %d", attr->y_offset); + goto err; + } + + if (attr->batch_size != 1) { + ERR("batch_size is not valid: %d", attr->batch_size); + goto err; + } + + dump_attributes(attr); + return true; +err: + dump_attributes(attr); + return false; +} + + +/* return true if image format has only Y plane*/ +bool ubwcp_image_y_only(u16 format) +{ + switch (format) { + case UBWCP_NV12_Y: + case UBWCP_NV124R_Y: + case UBWCP_TP10_Y: + case UBWCP_P010_Y: + case UBWCP_P016_Y: + return true; + default: + return false; + } +} + + +/* return true if image format has only UV plane*/ +bool ubwcp_image_uv_only(u16 format) +{ + switch (format) { + case UBWCP_NV12_UV: + case UBWCP_NV124R_UV: + case UBWCP_TP10_UV: + case UBWCP_P010_UV: + case UBWCP_P016_UV: + return true; + default: + return false; + } +} + +/* calculate and return metadata buffer size for a given plane + * and buffer attributes + * NOTE: in this function, we will only pass in NV12 format. + * NOT NV12_Y or NV12_UV etc. + * the Y or UV information is in the "plane" + * "format" here purely means "encoding format" and no information + * if some plane data is missing. + */ +static size_t metadata_buf_sz(struct ubwcp_driver *ubwcp, + enum ubwcp_std_image_format format, + u32 width, u32 height, u8 plane) +{ + size_t size; + u64 pitch; + u64 lines; + u64 tile_width; + u32 tile_height; + + struct ubwcp_image_format_info f_info; + struct ubwcp_plane_info p_info; + + f_info = ubwcp->format_info[format]; + + DBG_BUF_ATTR(""); + DBG_BUF_ATTR(""); + DBG_BUF_ATTR("Calculating metadata buffer size: format = %d, plane = %d", format, plane); + + if (plane >= f_info.planes) { + ERR("Format does not have requested plane info: format: %d, plane: %d", + format, plane); + WARN(1, "Fix this!!!!!"); + return 0; + } + + p_info = f_info.p_info[plane]; + + /* UV plane */ + if (plane == 1) { + width = width/2; + height = height/2; + } + + tile_width = p_info.tilesize_p.width; + tile_height = p_info.tilesize_p.height; + + /* pitch: # of tiles in a row + * lines: # of tile rows + */ + pitch = UBWCP_ALIGN((width + tile_width - 1)/tile_width, META_DATA_PITCH_ALIGN); + lines = UBWCP_ALIGN((height + tile_height - 1)/tile_height, META_DATA_HEIGHT_ALIGN); + + DBG_BUF_ATTR("image params : %d x %d (pixels)", width, height); + DBG_BUF_ATTR("tile params : %d x %d (pixels)", tile_width, tile_height); + DBG_BUF_ATTR("pitch : %d (%d)", pitch, width/tile_width); + DBG_BUF_ATTR("lines : %d (%d)", lines, height); + DBG_BUF_ATTR("size (p*l*bytes) : %d", pitch*lines*1); + + /* x1 below is only to clarify that we are multiplying by 1 bytes/tile */ + size = UBWCP_ALIGN(pitch*lines*1, META_DATA_SIZE_ALIGN); + + DBG_BUF_ATTR("size (aligned 4K): %zu (0x%zx)", size, size); + return size; +} + + +/* calculate and return size of pixel data buffer for a given plane + * and buffer attributes + */ +static size_t pixeldata_buf_sz(struct ubwcp_driver *ubwcp, + u16 format, u32 width, + u32 height, u8 plane) +{ + size_t size; + u64 pitch; + u64 lines; + u16 pixel_bytes; + u16 per_pixel; + u64 macro_tile_width_p; + u64 macro_tile_height_p; + + struct ubwcp_image_format_info f_info; + struct ubwcp_plane_info p_info; + + f_info = ubwcp->format_info[format]; + + DBG_BUF_ATTR(""); + DBG_BUF_ATTR(""); + DBG_BUF_ATTR("Calculating Pixeldata buffer size: format = %d, plane = %d", format, plane); + + if (plane >= f_info.planes) { + ERR("Format does not have requested plane info: format: %d, plane: %d", + format, plane); + WARN(1, "Fix this!!!!!"); + return 0; + } + + p_info = f_info.p_info[plane]; + + pixel_bytes = p_info.pixel_bytes; + per_pixel = p_info.per_pixel; + + /* UV plane */ + if (plane == 1) { + width = width/2; + height = height/2; + } + + macro_tile_width_p = p_info.macrotilesize_p.width; + macro_tile_height_p = p_info.macrotilesize_p.height; + + /* align pixel width and height macro tile width and height */ + pitch = UBWCP_ALIGN(width, macro_tile_width_p); + lines = UBWCP_ALIGN(height, macro_tile_height_p); + + DBG_BUF_ATTR("image params : %d x %d (pixels)", width, height); + DBG_BUF_ATTR("macro tile params: %d x %d (pixels)", macro_tile_width_p, + macro_tile_height_p); + DBG_BUF_ATTR("bytes_per_pixel : %d/%d", pixel_bytes, per_pixel); + DBG_BUF_ATTR("pitch : %d", pitch); + DBG_BUF_ATTR("lines : %d", lines); + DBG_BUF_ATTR("size (p*l*bytes) : %d", (pitch*lines*pixel_bytes)/per_pixel); + + size = UBWCP_ALIGN((pitch*lines*pixel_bytes)/per_pixel, PIXEL_DATA_SIZE_ALIGN); + + DBG_BUF_ATTR("size (aligned 4K): %zu (0x%zx)", size, size); + + return size; +} + +/* + * plane: must be 0 or 1 (1st plane == 0, 2nd plane == 1) + */ +static size_t ubwcp_ula_size(struct ubwcp_driver *ubwcp, u16 format, + u32 stride_b, u32 scanlines, u8 plane) +{ + size_t size; + + DBG_BUF_ATTR("%s(format = %d, plane = %d)", __func__, format, plane); + /* UV plane */ + if (plane == 1) + scanlines = scanlines/2; + size = stride_b*scanlines; + DBG_BUF_ATTR("Size of plane-%u: (%u * %u) = %zu (0x%zx)", + plane, stride_b, scanlines, size, size); + return size; +} + +int missing_plane_from_format(u16 ioctl_image_format) +{ + int missing_plane; + + switch (ioctl_image_format) { + case UBWCP_NV12_Y: + missing_plane = 2; + break; + case UBWCP_NV12_UV: + missing_plane = 1; + break; + case UBWCP_NV124R_Y: + missing_plane = 2; + break; + case UBWCP_NV124R_UV: + missing_plane = 1; + break; + case UBWCP_TP10_Y: + missing_plane = 2; + break; + case UBWCP_TP10_UV: + missing_plane = 1; + break; + case UBWCP_P010_Y: + missing_plane = 2; + break; + case UBWCP_P010_UV: + missing_plane = 1; + break; + case UBWCP_P016_Y: + missing_plane = 2; + break; + case UBWCP_P016_UV: + missing_plane = 1; + break; + default: + missing_plane = 0; + } + return missing_plane; +} + +int planes_in_format(enum ubwcp_std_image_format format) +{ + if (format == RGBA) + return 1; + else + return 2; +} + +enum ubwcp_std_image_format to_std_format(u16 ioctl_image_format) +{ + switch (ioctl_image_format) { + case UBWCP_RGBA8888: + return RGBA; + case UBWCP_NV12: + case UBWCP_NV12_Y: + case UBWCP_NV12_UV: + return NV12; + case UBWCP_NV124R: + case UBWCP_NV124R_Y: + case UBWCP_NV124R_UV: + return NV124R; + case UBWCP_TP10: + case UBWCP_TP10_Y: + case UBWCP_TP10_UV: + return TP10; + case UBWCP_P010: + case UBWCP_P010_Y: + case UBWCP_P010_UV: + return P010; + case UBWCP_P016: + case UBWCP_P016_Y: + case UBWCP_P016_UV: + return P016; + default: + WARN(1, "Fix this!!!"); + return STD_IMAGE_FORMAT_INVALID; + } +} + +unsigned int ubwcp_get_hw_image_format_value(u16 ioctl_image_format) +{ + enum ubwcp_std_image_format format; + + format = to_std_format(ioctl_image_format); + switch (format) { + case RGBA: + return HW_BUFFER_FORMAT_RGBA; + case NV12: + return HW_BUFFER_FORMAT_NV12; + case NV124R: + return HW_BUFFER_FORMAT_NV124R; + case P010: + return HW_BUFFER_FORMAT_P010; + case TP10: + return HW_BUFFER_FORMAT_TP10; + case P016: + return HW_BUFFER_FORMAT_P016; + default: + WARN(1, "Fix this!!!!!"); + return 0; + } +} + +/* calculate ULA buffer parms + * TBD: how do we make sure uv_start address (not the offset) + * is aligned per requirement: cache line + */ +static int ubwcp_calc_ula_params(struct ubwcp_driver *ubwcp, + struct ubwcp_buffer_attrs *attr, + size_t *ula_size, + size_t *uv_start_offset) +{ + size_t size; + enum ubwcp_std_image_format format; + int planes; + int missing_plane; + u32 stride; + u32 scanlines; + u32 planar_padding; + + stride = attr->stride; + scanlines = attr->scanlines; + planar_padding = attr->planar_padding; + + /* convert ioctl image format to standard image format */ + format = to_std_format(attr->image_format); + + + /* Number of "expected" planes in "the standard defined" image format */ + planes = planes_in_format(format); + + /* any plane missing? + * valid missing_plane values: + * 0 == no plane missing + * 1 == 1st plane missing + * 2 == 2nd plane missing + */ + missing_plane = missing_plane_from_format(attr->image_format); + + DBG_BUF_ATTR("ioctl_image_format : %d, std_format: %d", attr->image_format, format); + DBG_BUF_ATTR("planes_in_format : %d", planes); + DBG_BUF_ATTR("missing_plane : %d", missing_plane); + DBG_BUF_ATTR("Planar Padding : %d", planar_padding); + + if (planes == 1) { + /* uv_start beyond ULA range */ + size = ubwcp_ula_size(ubwcp, format, stride, scanlines, 0); + *uv_start_offset = size; + } else { + if (!missing_plane) { + /* size for both planes and padding */ + size = ubwcp_ula_size(ubwcp, format, stride, scanlines, 0); + size += planar_padding; + *uv_start_offset = size; + size += ubwcp_ula_size(ubwcp, format, stride, scanlines, 1); + } else { + if (missing_plane == 2) { + /* Y-only image, set uv_start beyond ULA range */ + size = ubwcp_ula_size(ubwcp, format, stride, scanlines, 0); + *uv_start_offset = size; + } else { + /* first plane data is not there */ + size = ubwcp_ula_size(ubwcp, format, stride, scanlines, 1); + *uv_start_offset = 0; /* uv data is at the beginning */ + } + } + } + + //TBD: cleanup + *ula_size = size; + DBG_BUF_ATTR("Before page align: Total ULA_Size: %d (0x%x) (planes + planar padding)", + *ula_size, *ula_size); + *ula_size = UBWCP_ALIGN(size, 4096); + DBG_BUF_ATTR("After page align : Total ULA_Size: %d (0x%x) (planes + planar padding)", + *ula_size, *ula_size); + return 0; +} + + +/* calculate UBWCP buffer parms */ +static int ubwcp_calc_ubwcp_buf_params(struct ubwcp_driver *ubwcp, + struct ubwcp_buffer_attrs *attr, + size_t *md_p0, size_t *pd_p0, + size_t *md_p1, size_t *pd_p1, + size_t *stride_tp10_b) +{ + int planes; + int missing_plane; + enum ubwcp_std_image_format format; + size_t stride_tp10_p; + + FENTRY(); + + /* convert ioctl image format to standard image format */ + format = to_std_format(attr->image_format); + missing_plane = missing_plane_from_format(attr->image_format); + planes = planes_in_format(format); //pass in 0 (RGB) should return 1 + + DBG_BUF_ATTR("ioctl_image_format : %d, std_format: %d", attr->image_format, format); + DBG_BUF_ATTR("planes_in_format : %d", planes); + DBG_BUF_ATTR("missing_plane : %d", missing_plane); + + if (!missing_plane) { + *md_p0 = metadata_buf_sz(ubwcp, format, attr->width, attr->height, 0); + *pd_p0 = pixeldata_buf_sz(ubwcp, format, attr->width, attr->height, 0); + if (planes == 2) { + *md_p1 = metadata_buf_sz(ubwcp, format, attr->width, attr->height, 1); + *pd_p1 = pixeldata_buf_sz(ubwcp, format, attr->width, attr->height, 1); + } + } else { + if (missing_plane == 1) { + *md_p0 = 0; + *pd_p0 = 0; + *md_p1 = metadata_buf_sz(ubwcp, format, attr->width, attr->height, 1); + *pd_p1 = pixeldata_buf_sz(ubwcp, format, attr->width, attr->height, 1); + } else { + *md_p0 = metadata_buf_sz(ubwcp, format, attr->width, attr->height, 0); + *pd_p0 = pixeldata_buf_sz(ubwcp, format, attr->width, attr->height, 0); + *md_p1 = 0; + *pd_p1 = 0; + } + } + + if (format == TP10) { + stride_tp10_p = UBWCP_ALIGN(attr->width, 192); + *stride_tp10_b = (stride_tp10_p/3) + stride_tp10_p; + } else { + *stride_tp10_b = 0; + } + + return 0; +} + + +/* reserve ULA address space of the given size */ +static phys_addr_t ubwcp_ula_alloc(struct ubwcp_driver *ubwcp, size_t size) +{ + phys_addr_t pa; + + mutex_lock(&ubwcp->ula_lock); + pa = gen_pool_alloc(ubwcp->ula_pool, size); + DBG("addr: %p, size: %zx", pa, size); + mutex_unlock(&ubwcp->ula_lock); + return pa; +} + + +/* free ULA address space of the given address and size */ +static void ubwcp_ula_free(struct ubwcp_driver *ubwcp, phys_addr_t pa, size_t size) +{ + mutex_lock(&ubwcp->ula_lock); + if (!gen_pool_has_addr(ubwcp->ula_pool, pa, size)) { + ERR("Attempt to free mem not from gen_pool: pa: %p, size: %zx", pa, size); + goto err; + } + DBG("addr: %p, size: %zx", pa, size); + gen_pool_free(ubwcp->ula_pool, pa, size); + mutex_unlock(&ubwcp->ula_lock); + return; + +err: + mutex_unlock(&ubwcp->ula_lock); +} + + +/* free up or expand current_pa and return the new pa */ +static phys_addr_t ubwcp_ula_realloc(struct ubwcp_driver *ubwcp, + phys_addr_t pa, + size_t size, + size_t new_size) +{ + if (size == new_size) + return pa; + + if (pa) + ubwcp_ula_free(ubwcp, pa, size); + + return ubwcp_ula_alloc(ubwcp, new_size); +} + + +/* unmap dma buf */ +static void ubwcp_dma_unmap(struct ubwcp_buf *buf) +{ + FENTRY(); + if (buf->dma_buf && buf->attachment) { + DBG("Calling dma_buf_unmap_attachment()"); + dma_buf_unmap_attachment(buf->attachment, buf->sgt, DMA_BIDIRECTIONAL); + buf->sgt = NULL; + dma_buf_detach(buf->dma_buf, buf->attachment); + buf->attachment = NULL; + } +} + + +/* dma map ubwcp buffer */ +static int ubwcp_dma_map(struct ubwcp_buf *buf, + struct device *dev, + size_t iova_min_size, + dma_addr_t *iova) +{ + int ret = 0; + struct dma_buf *dma_buf = buf->dma_buf; + struct dma_buf_attachment *attachment; + struct sg_table *sgt; + size_t dma_len; + + /* Map buffer to SMMU and get IOVA */ + attachment = dma_buf_attach(dma_buf, dev); + if (IS_ERR(attachment)) { + ret = PTR_ERR(attachment); + ERR("dma_buf_attach() failed: %d", ret); + goto err; + } + + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64)); + + sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(sgt)) { + ret = PTR_ERR(sgt); + ERR("dma_buf_map_attachment() failed: %d", ret); + goto err_detach; + } + + if (sgt->nents != 1) { + ERR("nents = %d", sgt->nents); + goto err_unmap; + } + + /* ensure that dma_buf is big enough for the new attrs */ + dma_len = sg_dma_len(sgt->sgl); + if (dma_len < iova_min_size) { + ERR("dma len: %d is less than min ubwcp buffer size: %d", + dma_len, iova_min_size); + goto err_unmap; + } + + *iova = sg_dma_address(sgt->sgl); + buf->attachment = attachment; + buf->sgt = sgt; + return ret; + +err_unmap: + dma_buf_unmap_attachment(attachment, sgt, DMA_BIDIRECTIONAL); +err_detach: + dma_buf_detach(dma_buf, attachment); +err: + if (!ret) + ret = -1; + return ret; +} + +static void +ubwcp_pixel_to_bytes(struct ubwcp_driver *ubwcp, + enum ubwcp_std_image_format format, + u32 width_p, u32 height_p, + u32 *width_b, u32 *height_b) +{ + u16 pixel_bytes; + u16 per_pixel; + struct ubwcp_image_format_info f_info; + struct ubwcp_plane_info p_info; + + f_info = ubwcp->format_info[format]; + p_info = f_info.p_info[0]; + + pixel_bytes = p_info.pixel_bytes; + per_pixel = p_info.per_pixel; + + *width_b = (width_p*pixel_bytes)/per_pixel; + *height_b = (height_p*pixel_bytes)/per_pixel; +} + +static void reset_buf_attrs(struct ubwcp_buf *buf) +{ + struct ubwcp_hw_meta_metadata *mmdata; + struct ubwcp_driver *ubwcp; + + ubwcp = buf->ubwcp; + mmdata = &buf->mmdata; + + ubwcp_dma_unmap(buf); + + /* reset ula params */ + if (buf->ula_size) { + ubwcp_ula_free(ubwcp, buf->ula_pa, buf->ula_size); + buf->ula_size = 0; + buf->ula_pa = 0; + } + /* reset ubwcp params */ + memset(mmdata, 0, sizeof(*mmdata)); + buf->buf_attr_set = false; +} + +static void print_mmdata_desc(struct ubwcp_hw_meta_metadata *mmdata) +{ + DBG_BUF_ATTR(""); + DBG_BUF_ATTR("--------MM_DATA DESC ---------"); + DBG_BUF_ATTR("uv_start_addr : 0x%08llx (cache addr) (actual: 0x%llx)", + mmdata->uv_start_addr, mmdata->uv_start_addr << 6); + DBG_BUF_ATTR("format : 0x%08x", mmdata->format); + DBG_BUF_ATTR("stride : 0x%08x (cache addr) (actual: 0x%x)", + mmdata->stride, mmdata->stride << 6); + DBG_BUF_ATTR("stride_ubwcp : 0x%08x (cache addr) (actual: 0x%zx)", + mmdata->stride_ubwcp, mmdata->stride_ubwcp << 6); + DBG_BUF_ATTR("metadata_base_y : 0x%08x (page addr) (actual: 0x%llx)", + mmdata->metadata_base_y, mmdata->metadata_base_y << 12); + DBG_BUF_ATTR("metadata_base_uv: 0x%08x (page addr) (actual: 0x%zx)", + mmdata->metadata_base_uv, mmdata->metadata_base_uv << 12); + DBG_BUF_ATTR("buffer_y_offset : 0x%08x (page addr) (actual: 0x%zx)", + mmdata->buffer_y_offset, mmdata->buffer_y_offset << 12); + DBG_BUF_ATTR("buffer_uv_offset: 0x%08x (page addr) (actual: 0x%zx)", + mmdata->buffer_uv_offset, mmdata->buffer_uv_offset << 12); + DBG_BUF_ATTR("width_height : 0x%08x (width: 0x%x height: 0x%x)", + mmdata->width_height, mmdata->width_height >> 16, mmdata->width_height & 0xFFFF); + DBG_BUF_ATTR(""); +} + +/* set buffer attributes: + * Failure: + * If a call to ubwcp_set_buf_attrs() fails, any attributes set from a previously + * successful ubwcp_set_buf_attrs() will be also removed. Thus, + * ubwcp_set_buf_attrs() implicitly does "unset previous attributes" and + * then "try to set these new attributes". + * + * The result of a failed call to ubwcp_set_buf_attrs() will leave the buffer + * in a linear mode, NOT with attributes from earlier successful call. + */ +int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) +{ + int ret = 0; + size_t ula_size = 0; + size_t uv_start_offset = 0; + phys_addr_t ula_pa = 0x0; + struct ubwcp_buf *buf; + struct ubwcp_driver *ubwcp; + + size_t metadata_p0; + size_t pixeldata_p0; + size_t metadata_p1; + size_t pixeldata_p1; + size_t iova_min_size; + size_t stride_tp10_b; + dma_addr_t iova_base; + struct ubwcp_hw_meta_metadata *mmdata; + u64 uv_start; + u32 stride_b; + u32 width_b; + u32 height_b; + enum ubwcp_std_image_format std_image_format; + + FENTRY(); + + if (!dmabuf) { + ERR("NULL dmabuf input ptr"); + return -EINVAL; + } + + if (!attr) { + ERR("NULL attr ptr"); + return -EINVAL; + } + + buf = dma_buf_to_ubwcp_buf(dmabuf); + if (!buf) { + ERR("No corresponding ubwcp_buf for the passed in dma_buf"); + return -EINVAL; + } + + mutex_lock(&buf->lock); + + if (buf->locked) { + ERR("Cannot set attr when buffer is locked"); + ret = -EBUSY; + goto err; + } + + ubwcp = buf->ubwcp; + mmdata = &buf->mmdata; + + //TBD: now that we have single exit point for all errors, + //we can limit this call to error only? + //also see if this can be part of reset_buf_attrs() + DBG_BUF_ATTR("resetting mmap to linear"); + /* remove any earlier dma buf mmap configuration */ + ret = ubwcp->mmap_config_fptr(buf->dma_buf, true, 0, 0); + if (ret) { + ERR("dma_buf_mmap_config() failed: %d", ret); + goto err; + } + + if (!ubwcp_buf_attrs_valid(attr)) { + ERR("Invalid buf attrs"); + goto err; + } + + DBG_BUF_ATTR("valid buf attrs"); + + if (attr->image_format == UBWCP_LINEAR) { + DBG_BUF_ATTR("Linear format requested"); + /* linear format request with permanent range xlation doesn't + * make sense. need to define behavior if this happens. + * note: with perm set, desc is allocated to this buffer. + */ + //TBD: UBWCP_ASSERT(!buf->perm); + + if (buf->buf_attr_set) + reset_buf_attrs(buf); + + mutex_unlock(&buf->lock); + return 0; + } + + std_image_format = to_std_format(attr->image_format); + if (std_image_format == STD_IMAGE_FORMAT_INVALID) { + ERR("Unable to map ioctl image format to std image format"); + goto err; + } + + /* Calculate uncompressed-buffer size. */ + DBG_BUF_ATTR(""); + DBG_BUF_ATTR(""); + DBG_BUF_ATTR("Calculating ula params -->"); + ret = ubwcp_calc_ula_params(ubwcp, attr, &ula_size, &uv_start_offset); + if (ret) { + ERR("ubwcp_calc_ula_params() failed: %d", ret); + goto err; + } + + DBG_BUF_ATTR(""); + DBG_BUF_ATTR(""); + DBG_BUF_ATTR("Calculating ubwcp params -->"); + ret = ubwcp_calc_ubwcp_buf_params(ubwcp, attr, + &metadata_p0, &pixeldata_p0, + &metadata_p1, &pixeldata_p1, + &stride_tp10_b); + if (ret) { + ERR("ubwcp_calc_buf_params() failed: %d", ret); + goto err; + } + + iova_min_size = metadata_p0 + pixeldata_p0 + metadata_p1 + pixeldata_p1; + + DBG_BUF_ATTR(""); + DBG_BUF_ATTR(""); + DBG_BUF_ATTR("------Summary ULA Calculated Params ------"); + DBG_BUF_ATTR("ULA Size : %8zu (0x%8zx)", ula_size, ula_size); + DBG_BUF_ATTR("UV Start Offset : %8zu (0x%8zx)", uv_start_offset, uv_start_offset); + DBG_BUF_ATTR("------Summary UBCP Calculated Params ------"); + DBG_BUF_ATTR("metadata_p0 : %8d (0x%8zx)", metadata_p0, metadata_p0); + DBG_BUF_ATTR("pixeldata_p0 : %8d (0x%8zx)", pixeldata_p0, pixeldata_p0); + DBG_BUF_ATTR("metadata_p1 : %8d (0x%8zx)", metadata_p1, metadata_p1); + DBG_BUF_ATTR("pixeldata_p1 : %8d (0x%8zx)", pixeldata_p1, pixeldata_p1); + DBG_BUF_ATTR("stride_tp10 : %8d (0x%8zx)", stride_tp10_b, stride_tp10_b); + DBG_BUF_ATTR("iova_min_size : %8d (0x%8zx)", iova_min_size, iova_min_size); + DBG_BUF_ATTR(""); + + if (buf->buf_attr_set) { + /* if buf attr were previously set, these must not be 0 */ + /* TBD: do we need this check in production code? */ + if (!buf->ula_pa) { + WARN(1, "ula_pa cannot be 0 if buf_attr_set is true!!!"); + goto err; + } + if (!buf->ula_size) { + WARN(1, "ula_size cannot be 0 if buf_attr_set is true!!!"); + goto err; + } + } + + /* assign ULA PA with uncompressed-size range */ + ula_pa = ubwcp_ula_realloc(ubwcp, buf->ula_pa, buf->ula_size, ula_size); + if (!ula_pa) { + ERR("ubwcp_ula_alloc/realloc() failed. running out of ULA PA space?"); + goto err; + } + + buf->ula_size = ula_size; + buf->ula_pa = ula_pa; + DBG_BUF_ATTR("Allocated ULA_PA: 0x%p of size: 0x%zx", ula_pa, ula_size); + DBG_BUF_ATTR(""); + + /* inform ULA-PA to dma-heap: needed for dma-heap to do CMOs later on */ + DBG_BUF_ATTR("Calling mmap_config(): ULA_PA: 0x%p size: 0x%zx", ula_pa, ula_size); + ret = ubwcp->mmap_config_fptr(buf->dma_buf, false, buf->ula_pa, + buf->ula_size); + if (ret) { + ERR("dma_buf_mmap_config() failed: %d", ret); + goto err; + } + + /* dma map only the first time attribute is set */ + if (!buf->buf_attr_set) { + /* linear -> ubwcp. map ubwcp buffer */ + ret = ubwcp_dma_map(buf, ubwcp->dev_buf_cb, iova_min_size, &iova_base); + if (ret) { + ERR("ubwcp_dma_map() failed: %d", ret); + goto err; + } + DBG_BUF_ATTR("dma_buf IOVA range: 0x%llx + min_size (0x%zx): 0x%llx", + iova_base, iova_min_size, iova_base + iova_min_size); + } + + uv_start = ula_pa + uv_start_offset; + if (!IS_ALIGNED(uv_start, 64)) { + ERR("ERROR: uv_start is NOT aligned to cache line"); + goto err; + } + + /* Convert height and width to bytes for writing to mmdata */ + if (std_image_format != TP10) { + ubwcp_pixel_to_bytes(ubwcp, std_image_format, attr->width, + attr->height, &width_b, &height_b); + } else { + /* for tp10 image compression, we need to program p010 width/height */ + ubwcp_pixel_to_bytes(ubwcp, P010, attr->width, + attr->height, &width_b, &height_b); + } + + stride_b = attr->stride; + + /* create the mmdata descriptor */ + memset(mmdata, 0, sizeof(*mmdata)); + mmdata->uv_start_addr = CACHE_ADDR(uv_start); + mmdata->format = ubwcp_get_hw_image_format_value(attr->image_format); + + if (std_image_format != TP10) { + mmdata->stride = CACHE_ADDR(stride_b); /* uncompressed stride */ + } else { + mmdata->stride = CACHE_ADDR(stride_tp10_b); /* compressed stride */ + mmdata->stride_ubwcp = CACHE_ADDR(stride_b); /* uncompressed stride */ + } + + mmdata->metadata_base_y = PAGE_ADDR(iova_base); + mmdata->metadata_base_uv = PAGE_ADDR(iova_base + metadata_p0 + pixeldata_p0); + mmdata->buffer_y_offset = PAGE_ADDR(metadata_p0); + mmdata->buffer_uv_offset = PAGE_ADDR(metadata_p1); + mmdata->width_height = width_b << 16 | height_b; + + print_mmdata_desc(mmdata); + + buf->buf_attr = *attr; + buf->buf_attr_set = true; + //TBD: UBWCP_ASSERT(!buf->perm); + mutex_unlock(&buf->lock); + return 0; + +err: + reset_buf_attrs(buf); + mutex_unlock(&buf->lock); + if (!ret) + ret = -1; + return ret; +} +EXPORT_SYMBOL(ubwcp_set_buf_attrs); + + +/* Set buffer attributes ioctl */ +static int ubwcp_set_buf_attrs_ioctl(struct ubwcp_ioctl_buffer_attrs *attr_ioctl) +{ + struct dma_buf *dmabuf; + + dmabuf = ubwcp_dma_buf_fd_to_dma_buf(attr_ioctl->fd); + + return ubwcp_set_buf_attrs(dmabuf, &attr_ioctl->attr); +} + + +/* Free up the buffer descriptor */ +static void ubwcp_buf_desc_free(struct ubwcp_driver *ubwcp, struct ubwcp_desc *desc) +{ + int idx = desc->idx; + struct ubwcp_desc *desc_list = ubwcp->desc_list; + + mutex_lock(&ubwcp->desc_lock); + desc_list[idx].idx = -1; + desc_list[idx].ptr = NULL; + DBG("freed descriptor_id: %d", idx); + mutex_unlock(&ubwcp->desc_lock); +} + + +/* Allocate next available buffer descriptor. */ +static struct ubwcp_desc *ubwcp_buf_desc_allocate(struct ubwcp_driver *ubwcp) +{ + int idx; + struct ubwcp_desc *desc_list = ubwcp->desc_list; + + mutex_lock(&ubwcp->desc_lock); + for (idx = 0; idx < UBWCP_BUFFER_DESC_COUNT; idx++) { + if (desc_list[idx].idx == -1) { + desc_list[idx].idx = idx; + desc_list[idx].ptr = ubwcp->buffer_desc_base + + idx*UBWCP_BUFFER_DESC_OFFSET; + DBG("allocated descriptor_id: %d", idx); + mutex_unlock(&ubwcp->desc_lock); + return &desc_list[idx]; + } + } + mutex_unlock(&ubwcp->desc_lock); + return NULL; +} + +#define FLUSH_WA_SIZE 64 +#define FLUSH_WA_UDELAY 89 +void ubwcp_flush_cache_wa(struct device *dev, phys_addr_t paddr, size_t size) +{ + phys_addr_t cline = paddr; + int num_line = size / FLUSH_WA_SIZE; + int i; + + for (i = 0; i < num_line; i++) { + dma_sync_single_for_cpu(dev, cline, FLUSH_WA_SIZE, 0); + udelay(FLUSH_WA_UDELAY); + cline += FLUSH_WA_SIZE; + } +} + +/** + * Lock buffer for CPU access. This prepares ubwcp hw to allow + * CPU access to the compressed buffer. It will perform + * necessary address translation configuration and cache maintenance ops + * so that CPU can safely access ubwcp buffer, if this call is + * successful. + * Allocate descriptor if not already, + * perform CMO and then enable range check + * + * @param dmabuf : ptr to the dma buf + * @param direction : direction of access + * + * @return int : 0 on success, otherwise error code + */ +static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir) +{ + int ret = 0; + struct ubwcp_buf *buf; + struct ubwcp_driver *ubwcp; + + FENTRY(); + + if (!dmabuf) { + ERR("NULL dmabuf input ptr"); + return -EINVAL; + } + + if (!valid_dma_direction(dir)) { + ERR("invalid direction: %d", dir); + return -EINVAL; + } + + buf = dma_buf_to_ubwcp_buf(dmabuf); + if (!buf) { + ERR("ubwcp_buf ptr not found"); + return -1; + } + + mutex_lock(&buf->lock); + + if (!buf->buf_attr_set) { + ERR("lock() called on buffer, but attr not set"); + goto err; + } + + if (buf->buf_attr.image_format == UBWCP_LINEAR) { + ERR("lock() called on linear buffer"); + goto err; + } + + if (!buf->locked) { + DBG("first lock on buffer"); + ubwcp = buf->ubwcp; + + /* buf->desc could already be allocated because of perm range xlation */ + if (!buf->desc) { + /* allocate a buffer descriptor */ + buf->desc = ubwcp_buf_desc_allocate(buf->ubwcp); + if (!buf->desc) { + ERR("ubwcp_allocate_buf_desc() failed"); + goto err; + } + + memcpy(buf->desc->ptr, &buf->mmdata, sizeof(buf->mmdata)); + + /* Flushing of updated mmdata: + * mmdata is iocoherent and ubwcp will get it from CPU cache - + * *as long as* it has not cached that itself during previous + * access to the same descriptor. + * + * During unlock of previous use of this descriptor, + * we do hw flush, which will get rid of this mmdata from + * ubwcp cache. + * + * In addition, we also do a hw flush after enable_range_ck(). + * That will also get rid of any speculative fetch of mmdata + * by the ubwcp hw. At this time, the assumption is that ubwcp + * will cache mmdata only for active descriptor. But if ubwcp + * is speculatively fetching mmdata for all descriptors + * (irrespetive of enabled or not), the flush during lock + * will be necessary to make sure ubwcp sees updated mmdata + * that we just updated + */ + + /* program ULA range for this buffer */ + DBG("setting range check: descriptor_id: %d, addr: %p, size: %zx", + buf->desc->idx, buf->ula_pa, buf->ula_size); + ubwcp_hw_set_range_check(ubwcp->base, buf->desc->idx, buf->ula_pa, + buf->ula_size); + } + + + /* enable range check */ + DBG("enabling range check, descriptor_id: %d", buf->desc->idx); + mutex_lock(&ubwcp->hw_range_ck_lock); + ubwcp_hw_enable_range_check(ubwcp->base, buf->desc->idx); + mutex_unlock(&ubwcp->hw_range_ck_lock); + + /* Flush/invalidate UBWCP caches */ + /* Why: cpu could have done a speculative fetch before + * enable_range_ck() and ubwcp in process of returning "default" data + * we don't want that stashing of default data pending. + * we force completion of that and then we also cpu invalidate which + * will get rid of that line. + */ + ubwcp_flush(ubwcp); + + /* Flush/invalidate ULA PA from CPU caches + * TBD: if (dir == READ or BIDIRECTION) //NOT for write + * -- Confirm with Chris if this can be skipped for write + */ + dma_sync_single_for_cpu(ubwcp->dev, buf->ula_pa, buf->ula_size, dir); + buf->lock_dir = dir; + buf->locked = true; + } else { + DBG("buf already locked"); + /* TBD: what if new buffer direction is not same as previous? + * must update the dir. + */ + } + buf->lock_count++; + DBG("new lock_count: %d", buf->lock_count); + mutex_unlock(&buf->lock); + return ret; + +err: + mutex_unlock(&buf->lock); + if (!ret) + ret = -1; + return ret; +} + +/* This can be called as a result of external unlock() call or + * internally if free() is called without unlock(). + * It can fail only for 1 reason: ubwcp_flush fails. currently we are ignoring the flush failure + * because it is hardware failure and no recovery path is defined. + */ +static int unlock_internal(struct ubwcp_buf *buf, enum dma_data_direction dir, bool free_buffer) +{ + struct ubwcp_driver *ubwcp; + + DBG("current lock_count: %d", buf->lock_count); + if (free_buffer) { + buf->lock_count = 0; + DBG("Forced lock_count: %d", buf->lock_count); + } else { + buf->lock_count--; + DBG("new lock_count: %d", buf->lock_count); + if (buf->lock_count) { + DBG("more than 1 lock on buffer. waiting until last unlock"); + return 0; + } + } + + ubwcp = buf->ubwcp; + + /* Flush/invalidate ULA PA from CPU caches */ + //TBD: if (dir == WRITE or BIDIRECTION) + //dma_sync_single_for_device(ubwcp->dev, buf->ula_pa, buf->ula_size, dir); + /* TODO: Use flush work around, remove when no longer needed */ + ubwcp_flush_cache_wa(ubwcp->dev, buf->ula_pa, buf->ula_size); + + /* TBD: confirm with HW if this should be done before or + * after disable_range_ck() + */ + ubwcp_flush(ubwcp); + + /* disable range check */ + DBG("disabling range check"); + mutex_lock(&ubwcp->hw_range_ck_lock); + ubwcp_hw_disable_range_check(ubwcp->base, buf->desc->idx); + mutex_unlock(&ubwcp->hw_range_ck_lock); + + /* release descriptor if perm range xlation is not set */ + if (!buf->perm) { + ubwcp_buf_desc_free(buf->ubwcp, buf->desc); + buf->desc = NULL; + } + buf->locked = false; + return 0; +} + + +/** + * Unlock buffer from CPU access. This prepares ubwcp hw to + * safely allow for device access to the compressed buffer including any + * necessary cache maintenance ops. It may also free up certain ubwcp + * resources that could result in error when accessed by CPU in + * unlocked state. + * + * @param dmabuf : ptr to the dma buf + * @param direction : direction of access + * + * @return int : 0 on success, otherwise error code + */ +static int ubwcp_unlock(struct dma_buf *dmabuf, enum dma_data_direction dir) +{ + struct ubwcp_buf *buf; + int ret; + + FENTRY(); + + if (!dmabuf) { + ERR("NULL dmabuf input ptr"); + return -EINVAL; + } + + if (!valid_dma_direction(dir)) { + ERR("invalid direction: %d", dir); + return -EINVAL; + } + + buf = dma_buf_to_ubwcp_buf(dmabuf); + if (!buf) { + ERR("ubwcp_buf not found"); + return -1; + } + + if (!buf->locked) { + ERR("unlock() called on buffer which not in locked state"); + return -1; + } + + error_print_count = 0; + mutex_lock(&buf->lock); + ret = unlock_internal(buf, dir, false); + mutex_unlock(&buf->lock); + return ret; +} + + +/* Return buffer attributes for the given buffer */ +int ubwcp_get_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) +{ + int ret = 0; + struct ubwcp_buf *buf; + + FENTRY(); + + if (!dmabuf) { + ERR("NULL dmabuf input ptr"); + return -EINVAL; + } + + if (!attr) { + ERR("NULL attr ptr"); + return -EINVAL; + } + + buf = dma_buf_to_ubwcp_buf(dmabuf); + if (!buf) { + ERR("ubwcp_buf ptr not found"); + return -1; + } + + mutex_lock(&buf->lock); + if (!buf->buf_attr_set) { + ERR("buffer attributes not set"); + mutex_unlock(&buf->lock); + return -1; + } + + *attr = buf->buf_attr; + + mutex_unlock(&buf->lock); + return ret; +} +EXPORT_SYMBOL(ubwcp_get_buf_attrs); + + +/* Set permanent range translation. + * enable: Descriptor will be reserved for this buffer until disabled, + * making lock/unlock quicker. + * disable: Descriptor will not be reserved for this buffer. Instead, + * descriptor will be allocated and released for each lock/unlock. + * If currently allocated but not being used, descriptor will be + * released. + */ +int ubwcp_set_perm_range_translation(struct dma_buf *dmabuf, bool enable) +{ + int ret = 0; + struct ubwcp_buf *buf; + + FENTRY(); + + if (!dmabuf) { + ERR("NULL dmabuf input ptr"); + return -EINVAL; + } + + buf = dma_buf_to_ubwcp_buf(dmabuf); + if (!buf) { + ERR("ubwcp_buf not found"); + return -1; + } + + /* not implemented */ + if (1) { + ERR("API not implemented yet"); + return -1; + } + + /* TBD: make sure we acquire buf lock while setting this so there is + * no race condition with attr_set/lock/unlock + */ + buf->perm = enable; + + /* if "disable" and we have allocated a desc and it is not being + * used currently, release it + */ + if (!enable && buf->desc && !buf->locked) { + ubwcp_buf_desc_free(buf->ubwcp, buf->desc); + buf->desc = NULL; + + /* Flush/invalidate UBWCP caches */ + //TBD: need to do anything? + } + + return ret; +} +EXPORT_SYMBOL(ubwcp_set_perm_range_translation); + +/** + * Free up ubwcp resources for this buffer. + * + * @param dmabuf : ptr to the dma buf + * + * @return int : 0 on success, otherwise error code + */ +static int ubwcp_free_buffer(struct dma_buf *dmabuf) +{ + int ret = 0; + struct ubwcp_buf *buf; + struct ubwcp_driver *ubwcp; + + FENTRY(); + + if (!dmabuf) { + ERR("NULL dmabuf input ptr"); + return -EINVAL; + } + + buf = dma_buf_to_ubwcp_buf(dmabuf); + if (!buf) { + ERR("ubwcp_buf ptr not found"); + return -1; + } + + mutex_lock(&buf->lock); + ubwcp = buf->ubwcp; + + if (buf->locked) { + DBG("free() called without unlock. unlock()'ing first..."); + ret = unlock_internal(buf, buf->lock_dir, true); + if (ret) + ERR("unlock_internal(): failed : %d, but continuing free()", ret); + } + + /* if we are still holding a desc, release it. this can happen only if perm == true */ + if (buf->desc) { + WARN_ON(!buf->perm); /* TBD: change to BUG() later...*/ + ubwcp_buf_desc_free(buf->ubwcp, buf->desc); + buf->desc = NULL; + } + + if (buf->buf_attr_set) + reset_buf_attrs(buf); + + mutex_lock(&ubwcp->buf_table_lock); + hash_del(&buf->hnode); + kfree(buf); + + /* If this is the last buffer being freed, power off ubwcp */ + if (hash_empty(ubwcp->buf_table)) { + DBG("last buffer: ~~~~~~~~~~~"); + /* TBD: If everything is working fine, ubwcp_flush() should not + * be needed here. Each buffer free logic should be taking + * care of flush. Just a note for now. Might need to add the + * flush here for debug purpose. + */ + DBG("Calling remove_memory() for ULA PA pool"); + ret = remove_memory(ubwcp->ula_pool_base, ubwcp->ula_pool_size); + if (ret) { + ERR("remove_memory failed st:0x%lx sz:0x%lx err: %d", + ubwcp->ula_pool_base, + ubwcp->ula_pool_size, ret); + goto err_remove_mem; + } else { + DBG("DONE: calling remove_memory() for ULA PA pool"); + } + DBG("Calling power OFF ..."); + ubwcp_power(ubwcp, false); + } + mutex_unlock(&ubwcp->buf_table_lock); + return ret; + +err_remove_mem: + mutex_unlock(&ubwcp->buf_table_lock); + if (!ret) + ret = -1; + DBG("returning error: %d", ret); + return ret; +} + + +/* file open: TBD: increment ref count? */ +static int ubwcp_open(struct inode *i, struct file *f) +{ + return 0; +} + + +/* file open: TBD: decrement ref count? */ +static int ubwcp_close(struct inode *i, struct file *f) +{ + return 0; +} + + +/* handle IOCTLs */ +static long ubwcp_ioctl(struct file *file, unsigned int ioctl_num, unsigned long ioctl_param) +{ + struct ubwcp_ioctl_buffer_attrs buf_attr_ioctl; + struct ubwcp_ioctl_hw_version hw_ver; + + switch (ioctl_num) { + case UBWCP_IOCTL_SET_BUF_ATTR: + if (copy_from_user(&buf_attr_ioctl, (const void __user *) ioctl_param, + sizeof(buf_attr_ioctl))) { + ERR("ERROR: copy_from_user() failed"); + return -EFAULT; + } + DBG("IOCTL : SET_BUF_ATTR: fd = %d", buf_attr_ioctl.fd); + return ubwcp_set_buf_attrs_ioctl(&buf_attr_ioctl); + + case UBWCP_IOCTL_GET_HW_VER: + DBG("IOCTL : GET_HW_VER"); + ubwcp_get_hw_version(&hw_ver); + if (copy_to_user((void __user *)ioctl_param, &hw_ver, sizeof(hw_ver))) { + ERR("ERROR: copy_to_user() failed"); + return -EFAULT; + } + break; + + default: + ERR("Invalid ioctl_num = %d", ioctl_num); + return -EINVAL; + } + return 0; +} + + +static const struct file_operations ubwcp_fops = { + .owner = THIS_MODULE, + .open = ubwcp_open, + .release = ubwcp_close, + .unlocked_ioctl = ubwcp_ioctl, +}; + + +static int ubwcp_debugfs_init(struct ubwcp_driver *ubwcp) +{ + struct dentry *debugfs_root; + + debugfs_root = debugfs_create_dir("ubwcp", NULL); + if (!debugfs_root) { + pr_warn("Failed to create debugfs for ubwcp\n"); + return -1; + } + + debugfs_create_u32("debug_trace_enable", 0644, debugfs_root, &ubwcp_debug_trace_enable); + + ubwcp->debugfs_root = debugfs_root; + return 0; +} + +static void ubwcp_debugfs_deinit(struct ubwcp_driver *ubwcp) +{ + debugfs_remove_recursive(ubwcp->debugfs_root); +} + +/* ubwcp char device initialization */ +static int ubwcp_cdev_init(struct ubwcp_driver *ubwcp) +{ + int ret; + dev_t devt; + struct class *dev_class; + struct device *dev_sys; + + /* allocate major device number (/proc/devices -> major_num ubwcp) */ + ret = alloc_chrdev_region(&devt, 0, UBWCP_NUM_DEVICES, UBWCP_DEVICE_NAME); + if (ret) { + ERR("alloc_chrdev_region() failed: %d", ret); + return ret; + } + + /* create device class (/sys/class/ubwcp_class) */ + dev_class = class_create(THIS_MODULE, "ubwcp_class"); + if (IS_ERR(dev_class)) { + ERR("class_create() failed"); + return -1; + } + + /* Create device and register with sysfs + * (/sys/class/ubwcp_class/ubwcp/... -> dev/power/subsystem/uevent) + */ + dev_sys = device_create(dev_class, NULL, devt, NULL, + UBWCP_DEVICE_NAME); + if (IS_ERR(dev_sys)) { + ERR("device_create() failed"); + return -1; + } + + /* register file operations and get cdev */ + cdev_init(&ubwcp->cdev, &ubwcp_fops); + + /* associate cdev and device major/minor with file system + * can do file ops on /dev/ubwcp after this + */ + ret = cdev_add(&ubwcp->cdev, devt, 1); + if (ret) { + ERR("cdev_add() failed"); + return -1; + } + + ubwcp->devt = devt; + ubwcp->dev_class = dev_class; + ubwcp->dev_sys = dev_sys; + return 0; +} + +static void ubwcp_cdev_deinit(struct ubwcp_driver *ubwcp) +{ + device_destroy(ubwcp->dev_class, ubwcp->devt); + class_destroy(ubwcp->dev_class); + cdev_del(&ubwcp->cdev); + unregister_chrdev_region(ubwcp->devt, UBWCP_NUM_DEVICES); +} + + +#define ERR_PRINT_COUNT_MAX 21 +/* TBD: use proper rate limit for debug prints */ +irqreturn_t ubwcp_irq_handler(int irq, void *ptr) +{ + struct ubwcp_driver *ubwcp; + void __iomem *base; + u64 src; + + error_print_count++; + + ubwcp = (struct ubwcp_driver *) ptr; + base = ubwcp->base; + + if (irq == ubwcp->irq_range_ck_rd) { + if (error_print_count < ERR_PRINT_COUNT_MAX) { + src = ubwcp_hw_interrupt_src_address(base, 0); + ERR("check range read error: src: 0x%llx", src << 6); + } + ubwcp_hw_interrupt_clear(ubwcp->base, 0); + } else if (irq == ubwcp->irq_range_ck_wr) { + if (error_print_count < ERR_PRINT_COUNT_MAX) { + src = ubwcp_hw_interrupt_src_address(base, 1); + ERR("check range write error: src: 0x%llx", src << 6); + } + ubwcp_hw_interrupt_clear(ubwcp->base, 1); + } else if (irq == ubwcp->irq_encode) { + if (error_print_count < ERR_PRINT_COUNT_MAX) { + src = ubwcp_hw_interrupt_src_address(base, 3); + ERR("encode error: src: 0x%llx", src << 6); + } + ubwcp_hw_interrupt_clear(ubwcp->base, 3); //TBD: encode is bit-3 instead of bit-2 + } else if (irq == ubwcp->irq_decode) { + if (error_print_count < ERR_PRINT_COUNT_MAX) { + src = ubwcp_hw_interrupt_src_address(base, 2); + ERR("decode error: src: 0x%llx", src << 6); + } + ubwcp_hw_interrupt_clear(ubwcp->base, 2); //TBD: decode is bit-2 instead of bit-3 + } else { + ERR("unknown irq: %d", irq); + return IRQ_NONE; + } + + return IRQ_HANDLED; +} + +static int ubwcp_interrupt_register(struct platform_device *pdev, struct ubwcp_driver *ubwcp) +{ + int ret = 0; + struct device *dev = &pdev->dev; + + FENTRY(); + + ubwcp->irq_range_ck_rd = platform_get_irq(pdev, 0); + if (ubwcp->irq_range_ck_rd < 0) + return ubwcp->irq_range_ck_rd; + + ubwcp->irq_range_ck_wr = platform_get_irq(pdev, 1); + if (ubwcp->irq_range_ck_wr < 0) + return ubwcp->irq_range_ck_wr; + + ubwcp->irq_encode = platform_get_irq(pdev, 2); + if (ubwcp->irq_encode < 0) + return ubwcp->irq_encode; + + ubwcp->irq_decode = platform_get_irq(pdev, 3); + if (ubwcp->irq_decode < 0) + return ubwcp->irq_decode; + + DBG("got irqs: %d %d %d %d", ubwcp->irq_range_ck_rd, + ubwcp->irq_range_ck_wr, + ubwcp->irq_encode, + ubwcp->irq_decode); + + ret = devm_request_irq(dev, ubwcp->irq_range_ck_rd, ubwcp_irq_handler, 0, "ubwcp", ubwcp); + if (ret) { + ERR("request_irq() failed. irq: %d ret: %d", + ubwcp->irq_range_ck_rd, ret); + return ret; + } + + ret = devm_request_irq(dev, ubwcp->irq_range_ck_wr, ubwcp_irq_handler, 0, "ubwcp", ubwcp); + if (ret) { + ERR("request_irq() failed. irq: %d ret: %d", + ubwcp->irq_range_ck_wr, ret); + return ret; + } + + ret = devm_request_irq(dev, ubwcp->irq_encode, ubwcp_irq_handler, 0, "ubwcp", ubwcp); + if (ret) { + ERR("request_irq() failed. irq: %d ret: %d", + ubwcp->irq_encode, ret); + return ret; + } + + ret = devm_request_irq(dev, ubwcp->irq_decode, ubwcp_irq_handler, 0, "ubwcp", ubwcp); + if (ret) { + ERR("request_irq() failed. irq: %d ret: %d", + ubwcp->irq_decode, ret); + return ret; + } + + return ret; +} + +/* ubwcp device probe */ +static int qcom_ubwcp_probe(struct platform_device *pdev) +{ + int ret = 0; + struct ubwcp_driver *ubwcp; + struct device *ubwcp_dev = &pdev->dev; + + FENTRY(); + + ubwcp = devm_kzalloc(ubwcp_dev, sizeof(*ubwcp), GFP_KERNEL); + if (!ubwcp) { + ERR("devm_kzalloc() failed"); + return -ENOMEM; + } + + ubwcp->dev = &pdev->dev; + + ret = dma_set_mask_and_coherent(ubwcp->dev, DMA_BIT_MASK(64)); + +#ifdef UBWCP_USE_SMC + { + struct resource res; + + of_address_to_resource(ubwcp_dev->of_node, 0, &res); + ubwcp->base = (void __iomem *) res.start; + DBG("Using SMC calls. base: %p", ubwcp->base); + } +#else + ubwcp->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ubwcp->base)) { + ERR("devm ioremap() failed: %d", PTR_ERR(ubwcp->base)); + return PTR_ERR(ubwcp->base); + } + DBG("ubwcp->base: %p", ubwcp->base); +#endif + + ret = of_property_read_u64_index(ubwcp_dev->of_node, "ula_range", 0, &ubwcp->ula_pool_base); + if (ret) { + ERR("failed reading ula_range (base): %d", ret); + return ret; + } + DBG("ubwcp: ula_range: base = 0x%lx", ubwcp->ula_pool_base); + + ret = of_property_read_u64_index(ubwcp_dev->of_node, "ula_range", 1, &ubwcp->ula_pool_size); + if (ret) { + ERR("failed reading ula_range (size): %d", ret); + return ret; + } + DBG("ubwcp: ula_range: size = 0x%lx", ubwcp->ula_pool_size); + + /*TBD: remove later. reducing size for quick testing...*/ + ubwcp->ula_pool_size = 0x20000000; //500MB instead of 8GB + + if (ubwcp_interrupt_register(pdev, ubwcp)) + return -1; + + /* Regulator */ + ubwcp->vdd = devm_regulator_get(ubwcp_dev, "vdd"); + if (IS_ERR(ubwcp->vdd)) { + ret = PTR_ERR(ubwcp->vdd); + ERR("devm_regulator_get() failed: %d", ret); + return ret; + } + + mutex_init(&ubwcp->desc_lock); + mutex_init(&ubwcp->buf_table_lock); + mutex_init(&ubwcp->ula_lock); + mutex_init(&ubwcp->ubwcp_flush_lock); + mutex_init(&ubwcp->hw_range_ck_lock); + + + if (ubwcp_power(ubwcp, true)) + return -1; + + if (ubwcp_cdev_init(ubwcp)) + return -1; + + if (ubwcp_debugfs_init(ubwcp)) + return -1; + + /* create ULA pool */ + ubwcp->ula_pool = gen_pool_create(12, -1); + if (!ubwcp->ula_pool) { + ERR("failed gen_pool_create()"); + ret = -1; + goto err_pool_create; + } + + ret = gen_pool_add(ubwcp->ula_pool, ubwcp->ula_pool_base, ubwcp->ula_pool_size, -1); + if (ret) { + ERR("failed gen_pool_add(): %d", ret); + ret = -1; + goto err_pool_add; + } + + /* register the default config mmap function. */ + ubwcp->mmap_config_fptr = msm_ubwcp_dma_buf_configure_mmap; + + hash_init(ubwcp->buf_table); + ubwcp_buf_desc_list_init(ubwcp); + image_format_init(ubwcp); + + /* one time hw init */ + ubwcp_hw_one_time_init(ubwcp->base); + ubwcp_hw_version(ubwcp->base, &ubwcp->hw_ver_major, &ubwcp->hw_ver_minor); + DBG("read version: major %d, minor %d", + ubwcp->hw_ver_major, ubwcp->hw_ver_minor); + + + if (ubwcp->hw_ver_major == 0) { + ERR("Failed to read HW version"); + ret = -1; + goto err_pool_add; + } + + /* set pdev->dev->driver_data = ubwcp */ + platform_set_drvdata(pdev, ubwcp); + me = ubwcp; + + /* enable all 4 interrupts */ + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_READ_ERROR, true); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_WRITE_ERROR, true); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_ENCODE_ERROR, true); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_DECODE_ERROR, true); + + /* Turn OFF until buffers are allocated */ + if (ubwcp_power(ubwcp, false)) { + ret = -1; + goto err_power_off; + } + + ret = msm_ubwcp_set_ops(ubwcp_init_buffer, ubwcp_free_buffer, ubwcp_lock, ubwcp_unlock); + if (ret) { + ERR("msm_ubwcp_set_ops() failed: %d", ret); + goto err_power_off; + } else { + DBG("msm_ubwcp_set_ops(): success"); } + + return ret; + +err_power_off: + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_READ_ERROR, false); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_WRITE_ERROR, false); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_ENCODE_ERROR, false); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_DECODE_ERROR, false); +err_pool_add: + gen_pool_destroy(ubwcp->ula_pool); +err_pool_create: + ubwcp_cdev_deinit(ubwcp); + + return ret; +} + + +/* buffer context bank device probe */ +static int ubwcp_probe_cb_buf(struct platform_device *pdev) +{ + struct ubwcp_driver *ubwcp; + + FENTRY(); + + ubwcp = dev_get_drvdata(pdev->dev.parent); + if (!ubwcp) { + ERR("failed to get ubwcp ptr"); + return -EINVAL; + } + + /* save the buffer cb device */ + ubwcp->dev_buf_cb = &pdev->dev; + return 0; +} + +/* descriptor context bank device probe */ +static int ubwcp_probe_cb_desc(struct platform_device *pdev) +{ + int ret = 0; + struct ubwcp_driver *ubwcp; + + FENTRY(); + + ubwcp = dev_get_drvdata(pdev->dev.parent); + if (!ubwcp) { + ERR("failed to get ubwcp ptr"); + return -EINVAL; + } + + ubwcp->buffer_desc_size = UBWCP_BUFFER_DESC_OFFSET * + UBWCP_BUFFER_DESC_COUNT; + + ubwcp->dev_desc_cb = &pdev->dev; + + dma_set_max_seg_size(ubwcp->dev_desc_cb, DMA_BIT_MASK(32)); + dma_set_seg_boundary(ubwcp->dev_desc_cb, (unsigned long)DMA_BIT_MASK(64)); + + /* Allocate buffer descriptors. UBWCP is iocoherent device. + * Thus we don't need to flush after updates to buffer descriptors. + */ + ubwcp->buffer_desc_base = dma_alloc_coherent(ubwcp->dev_desc_cb, + ubwcp->buffer_desc_size, + &ubwcp->buffer_desc_dma_handle, + GFP_KERNEL); + if (!ubwcp->buffer_desc_base) { + ERR("failed to allocate desc buffer"); + return -ENOMEM; + } + + DBG("desc_base = %p size = %zu", ubwcp->buffer_desc_base, + ubwcp->buffer_desc_size); + + //TBD: + ubwcp_power(ubwcp, true); + ubwcp_hw_set_buf_desc(ubwcp->base, (u64) ubwcp->buffer_desc_dma_handle, + UBWCP_BUFFER_DESC_OFFSET); + + ubwcp_power(ubwcp, false); + + return ret; +} + +/* buffer context bank device remove */ +static int ubwcp_remove_cb_buf(struct platform_device *pdev) +{ + struct ubwcp_driver *ubwcp; + + FENTRY(); + + ubwcp = dev_get_drvdata(pdev->dev.parent); + if (!ubwcp) { + ERR("failed to get ubwcp ptr"); + return -EINVAL; + } + + /* remove buf_cb reference */ + ubwcp->dev_buf_cb = NULL; + return 0; +} + +/* descriptor context bank device remove */ +static int ubwcp_remove_cb_desc(struct platform_device *pdev) +{ + struct ubwcp_driver *ubwcp; + + FENTRY(); + + ubwcp = dev_get_drvdata(pdev->dev.parent); + if (!ubwcp) { + ERR("failed to get ubwcp ptr"); + return -EINVAL; + } + + if (!ubwcp->dev_desc_cb) { + ERR("ubwcp->dev_desc_cb == NULL"); + return -1; + } + + ubwcp_power(ubwcp, true); + ubwcp_hw_set_buf_desc(ubwcp->base, 0x0, 0x0); + ubwcp_power(ubwcp, false); + + dma_free_coherent(ubwcp->dev_desc_cb, + ubwcp->buffer_desc_size, + ubwcp->buffer_desc_base, + ubwcp->buffer_desc_dma_handle); + ubwcp->buffer_desc_base = NULL; + ubwcp->buffer_desc_dma_handle = 0; + return 0; +} + +/* ubwcp device remove */ +static int qcom_ubwcp_remove(struct platform_device *pdev) +{ + size_t avail; + size_t psize; + struct ubwcp_driver *ubwcp; + + FENTRY(); + + /* get pdev->dev->driver_data = ubwcp */ + ubwcp = platform_get_drvdata(pdev); + if (!ubwcp) { + ERR("ubwcp == NULL"); + return -1; + } + + ubwcp_power(ubwcp, true); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_READ_ERROR, false); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_WRITE_ERROR, false); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_ENCODE_ERROR, false); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_DECODE_ERROR, false); + ubwcp_power(ubwcp, false); + + /* before destroying, make sure pool is empty. otherwise pool_destroy() panics. + * TBD: remove this check for production code and let it panic + */ + avail = gen_pool_avail(ubwcp->ula_pool); + psize = gen_pool_size(ubwcp->ula_pool); + if (psize != avail) { + ERR("gen_pool is not empty! avail: %zx size: %zx", avail, psize); + ERR("skipping pool destroy....cause it will PANIC. Fix this!!!!"); + WARN(1, "Fix this!"); + } else { + gen_pool_destroy(ubwcp->ula_pool); + } + ubwcp_debugfs_deinit(ubwcp); + ubwcp_cdev_deinit(ubwcp); + + return 0; +} + + +/* top level ubwcp device probe function */ +static int ubwcp_probe(struct platform_device *pdev) +{ + const char *compatible = ""; + + FENTRY(); + + if (of_device_is_compatible(pdev->dev.of_node, "qcom,ubwcp")) + return qcom_ubwcp_probe(pdev); + else if (of_device_is_compatible(pdev->dev.of_node, "qcom,ubwcp-context-bank-desc")) + return ubwcp_probe_cb_desc(pdev); + else if (of_device_is_compatible(pdev->dev.of_node, "qcom,ubwcp-context-bank-buf")) + return ubwcp_probe_cb_buf(pdev); + + of_property_read_string(pdev->dev.of_node, "compatible", &compatible); + ERR("unknown device: %s", compatible); + + WARN_ON(1); + return -EINVAL; +} + +/* top level ubwcp device remove function */ +static int ubwcp_remove(struct platform_device *pdev) +{ + const char *compatible = ""; + + FENTRY(); + + /* TBD: what if buffers are still allocated? locked? etc. + * also should turn off power? + */ + + if (of_device_is_compatible(pdev->dev.of_node, "qcom,ubwcp")) + return qcom_ubwcp_remove(pdev); + else if (of_device_is_compatible(pdev->dev.of_node, "qcom,ubwcp-context-bank-desc")) + return ubwcp_remove_cb_desc(pdev); + else if (of_device_is_compatible(pdev->dev.of_node, "qcom,ubwcp-context-bank-buf")) + return ubwcp_remove_cb_buf(pdev); + + of_property_read_string(pdev->dev.of_node, "compatible", &compatible); + ERR("unknown device: %s", compatible); + + WARN_ON(1); + return -EINVAL; +} + + +static const struct of_device_id ubwcp_dt_match[] = { + {.compatible = "qcom,ubwcp"}, + {.compatible = "qcom,ubwcp-context-bank-desc"}, + {.compatible = "qcom,ubwcp-context-bank-buf"}, + {} +}; + +struct platform_driver ubwcp_platform_driver = { + .probe = ubwcp_probe, + .remove = ubwcp_remove, + .driver = { + .name = "qcom,ubwcp", + .of_match_table = ubwcp_dt_match, + }, +}; + +int ubwcp_init(void) +{ + int ret = 0; + + DBG("+++++++++++"); + + ret = platform_driver_register(&ubwcp_platform_driver); + if (ret) + ERR("platform_driver_register() failed: %d", ret); + + return ret; +} + +void ubwcp_exit(void) +{ + platform_driver_unregister(&ubwcp_platform_driver); + + DBG("-----------"); +} + +module_init(ubwcp_init); +module_exit(ubwcp_exit); + +MODULE_LICENSE("GPL"); diff --git a/ubwcp/ubwcp.h b/ubwcp/ubwcp.h new file mode 100644 index 0000000000..b2eab37beb --- /dev/null +++ b/ubwcp/ubwcp.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UBWCP_H_ +#define __UBWCP_H_ + +#include +#include + +#include "include/uapi/ubwcp_ioctl.h" + + +typedef int (*configure_mmap)(struct dma_buf *dmabuf, bool linear, phys_addr_t ula_pa_addr, + size_t ula_pa_size); + +/** + * Get UBWCP hardware version + * + * @param ver : ptr to ver struct where hw version will be + * copied + * + * @return int : 0 on success, otherwise error code + */ +int ubwcp_get_hw_version(struct ubwcp_ioctl_hw_version *ver); + +/** + * Configures ubwcp buffer with the provided buffer image + * attributes. This call must be done at least once before + * ubwcp_lock(). Attributes can be configured multiple times, + * but only during unlocked state. + * + * @param dmabuf : ptr to the dma buf + * @param attr : buffer attributes to set + * + * @return int : 0 on success, otherwise error code + */ +int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr); + +/** + * Get the currently configured attributes for the buffer + * + * @param dmabuf : ptr to the dma buf + * @param attr : pointer to location where image attributes + * for this buffer will be copied to. + * + * @return int : 0 on success, otherwise error code + */ +int ubwcp_get_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr); + +/** + * Set permanent range translation for the buffer. This reserves + * ubwcp address translation resources for the buffer until free + * is called. This may speed up lock()/unlock() calls as they + * don't need to configure address translations for the buffer. + * + * @param dmabuf : ptr to the dma buf + * @param enable : true == enable, false == disable + * + * @return int : 0 on success, otherwise error code + */ +int ubwcp_set_perm_range_translation(struct dma_buf *dmabuf, bool enable); + +#endif /* __UBWCP_H_ */ diff --git a/ubwcp/ubwcp_hw.c b/ubwcp/ubwcp_hw.c new file mode 100644 index 0000000000..18e6d720c4 --- /dev/null +++ b/ubwcp/ubwcp_hw.c @@ -0,0 +1,360 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ubwcp_hw.h" + +extern u32 ubwcp_debug_trace_enable; +//#define DBG(fmt, args...) +#define DBG(fmt, args...) \ + do { \ + if (ubwcp_debug_trace_enable) \ + pr_err("ubwcp: hw: %s(): " fmt "\n", __func__, ##args); \ + } while (0) +#define ERR(fmt, args...) \ + do { \ + if (ubwcp_debug_trace_enable) \ + pr_err("ubwcp: hw: %s(): ~~~ERROR~~~: " fmt "\n", __func__, ##args); \ + } while (0) + +MODULE_LICENSE("GPL"); + +#define PAGE_ADDR_4K(_x) ((_x) >> 12) + +/* register offsets from base */ +#define RANGE_LOWER 0x0000 +#define RANGE_HIGHER 0x0800 +#define DESC_BASE 0x1000 +#define DESC_BASE_STRIDE 0x1004 +#define CONFIG 0x1008 +#define ENCODER_CONFIG 0x100C +#define ENCODER_STATUS 0x1010 +#define DECODER_CONFIG 0x1014 +#define DECODER_STATUS 0x1018 +#define RANGE_CHECK_FAIL 0x101C +#define RANGE_CHECK_CONTROL 0x1020 +#define RANGE_CHECK_STATUS 0x1060 +#define FLUSH_CONTROL 0x10A0 +#define FLUSH_STATUS 0x10A4 +#define INTERRUPT_SET 0x10B0 +#define INTERRUPT_STATUS_READ 0x10C0 +#define INTERRUPT_STATUS_WRITE 0x10C4 +#define INTERRUPT_STATUS_ENCODE 0x10C8 +#define INTERRUPT_STATUS_DECODE 0x10CC +#define INTERRUPT_READ_SRC_LOW 0x1100 +#define INTERRUPT_READ_SRC_HIGH 0x1104 +#define INTERRUPT_WRITE_SRC_LOW 0x1108 +#define INTERRUPT_WRITE_SRC_HIGH 0x110C +#define INTERRUPT_ENCODE_SRC_LOW 0x1110 +#define INTERRUPT_ENCODE_SRC_HIGH 0x1114 +#define INTERRUPT_DECODE_SRC_LOW 0x1118 +#define INTERRUPT_DECODE_SRC_HIGH 0x111C +#define INTERRUPT_CLEAR 0x1120 +#define QNS4_PARAMS 0x1124 +#define OVERRIDE 0x112C +#define VERSION_CONTROL 0x1130 + + +/* read/write register */ +#if defined(UBWCP_USE_SMC) +#define UBWCP_REG_READ(_base, _offset) \ + ({u32 _reg = 0; int _ret; \ + _ret = qcom_scm_io_readl((phys_addr_t)(_base + _offset), &_reg); \ + if (_ret) \ + DBG("scm_read() failed: %d", _ret); \ + else \ + DBG("scm_read() : %p + 0x%x -> 0x%08x", _base, _offset, _reg); \ + _reg; }) + +#define UBWCP_REG_WRITE(_base, _offset, _value) \ + {int _ret;\ + _ret = qcom_scm_io_writel((phys_addr_t)(_base + _offset), _value); \ + if (_ret) \ + DBG("scm_write() failed: %d", _ret); \ + else \ + DBG("scm_write(): %p + 0x%x <- 0x%08x", _base, _offset, _value); \ + } +#elif defined(UBWCP_DEBUG_REG_RW) +#define UBWCP_REG_READ(_base, _offset) \ + ({u32 _reg; \ + _reg = ioread32(_base + _offset); \ + DBG("READ : 0x%x -> 0x%08x", _offset, _reg); \ + _reg; }) + +#define UBWCP_REG_WRITE(_base, _offset, _value) \ + { \ + DBG("WRITE: 0x%x <- 0x%08x", _offset, _value); \ + iowrite32(_value, _base + _offset); \ + } +#elif defined(UBWCP_DUMMY_REG_RW) +/* do nothing */ +#define UBWCP_REG_READ(_base, _offset) ((_base + _offset) ? 0x0 : 0x0) +#define UBWCP_REG_WRITE(_base, _offset, _value) ((_base + _offset + _value) ? 0x0 : 0x0) + +#else + +#define UBWCP_REG_READ(_base, _offset) ioread32(_base + _offset) +#define UBWCP_REG_WRITE(_base, _offset, _value) iowrite32(_value, _base + _offset) + +#endif + +#define UBWCP_REG_READ_NO_DBG(_base, _offset) ioread32(_base + _offset) +#define UBWCP_REG_WRITE_NO_DBG(_base, _offset, _value) iowrite32(_value, _base + _offset) + + +void ubwcp_hw_interrupt_enable(void __iomem *base, u16 interrupt, bool enable) +{ + u32 value; + + value = UBWCP_REG_READ(base, INTERRUPT_SET); + + if (enable) + value = value | (1 << interrupt); + else + value = value & ~(1 << interrupt); + + UBWCP_REG_WRITE(base, INTERRUPT_SET, value); +} +EXPORT_SYMBOL(ubwcp_hw_interrupt_enable); + +void ubwcp_hw_interrupt_clear(void __iomem *base, u16 interrupt) +{ + UBWCP_REG_WRITE_NO_DBG(base, INTERRUPT_CLEAR, (1 << interrupt)); +} +EXPORT_SYMBOL(ubwcp_hw_interrupt_clear); + +int ubwcp_hw_interrupt_status(void __iomem *base, u16 interrupt) +{ + int value = -1; + + switch (interrupt) { + case INTERRUPT_READ_ERROR: + value = UBWCP_REG_READ(base, INTERRUPT_STATUS_READ) & 0x1; + break; + case INTERRUPT_WRITE_ERROR: + value = UBWCP_REG_READ(base, INTERRUPT_STATUS_WRITE) & 0x1; + break; + case INTERRUPT_DECODE_ERROR: + value = UBWCP_REG_READ(base, INTERRUPT_STATUS_DECODE) & 0x1; + break; + case INTERRUPT_ENCODE_ERROR: + value = UBWCP_REG_READ(base, INTERRUPT_STATUS_ENCODE) & 0x1; + break; + default: + /* TBD: fatal error? */ + break; + } + + return value; +} + +/* returns the address which caused this interrupt */ +u64 ubwcp_hw_interrupt_src_address(void __iomem *base, u16 interrupt) +{ + u32 addr_low; + u32 addr_high; + + switch (interrupt) { + case INTERRUPT_READ_ERROR: + addr_low = UBWCP_REG_READ(base, INTERRUPT_READ_SRC_LOW); + addr_high = UBWCP_REG_READ(base, INTERRUPT_READ_SRC_HIGH) & 0xF; + break; + case INTERRUPT_WRITE_ERROR: + addr_low = UBWCP_REG_READ(base, INTERRUPT_WRITE_SRC_LOW); + addr_high = UBWCP_REG_READ(base, INTERRUPT_WRITE_SRC_HIGH) & 0xF; + break; + case INTERRUPT_DECODE_ERROR: + addr_low = UBWCP_REG_READ(base, INTERRUPT_DECODE_SRC_LOW); + addr_high = UBWCP_REG_READ(base, INTERRUPT_DECODE_SRC_HIGH) & 0xF; + break; + case INTERRUPT_ENCODE_ERROR: + addr_low = UBWCP_REG_READ(base, INTERRUPT_ENCODE_SRC_LOW); + addr_high = UBWCP_REG_READ(base, INTERRUPT_ENCODE_SRC_HIGH) & 0xF; + break; + default: + /* TBD: fatal error? */ + addr_low = 0x0; + addr_high = 0x0; + break; + } + + return ((addr_high << 31) | addr_low); +} +EXPORT_SYMBOL(ubwcp_hw_interrupt_src_address); + +/* + * @index: index of buffer (from 0 to 255) + * @pa : ULA PA start address + * @size : size of ULA PA address range + */ +void ubwcp_hw_set_range_check(void __iomem *base, u16 index, phys_addr_t pa, size_t size) +{ + u32 lower; + u32 higher; + + lower = PAGE_ADDR_4K(pa); + higher = PAGE_ADDR_4K(pa + size); + + UBWCP_REG_WRITE(base, RANGE_LOWER + index*4, lower); + UBWCP_REG_WRITE(base, RANGE_HIGHER + index*4, higher); +} +EXPORT_SYMBOL(ubwcp_hw_set_range_check); + +/* enable range ck: + * identify control register for this index. + * 32bits in each ctrl reg. upto 8 regs for 256 indexes + */ +void ubwcp_hw_enable_range_check(void __iomem *base, u16 index) +{ + u32 val; + u16 ctrl_reg = index >> 5; + + val = UBWCP_REG_READ(base, RANGE_CHECK_CONTROL + ctrl_reg*4); + val |= (1 << (index & 0x1F)); + UBWCP_REG_WRITE(base, RANGE_CHECK_CONTROL + ctrl_reg*4, val); +} +EXPORT_SYMBOL(ubwcp_hw_enable_range_check); + +void ubwcp_hw_disable_range_check(void __iomem *base, u16 index) +{ + u32 val; + u16 ctrl_reg = index >> 5; + + val = UBWCP_REG_READ(base, RANGE_CHECK_CONTROL + ctrl_reg*4); + val &= ~(1 << (index & 0x1F)); + UBWCP_REG_WRITE(base, RANGE_CHECK_CONTROL + ctrl_reg*4, val); +} +EXPORT_SYMBOL(ubwcp_hw_disable_range_check); + +void ubwcp_hw_set_buf_desc(void __iomem *base, u64 desc_addr, u16 desc_stride) +{ + UBWCP_REG_WRITE(base, DESC_BASE, PAGE_ADDR_4K(desc_addr)); + UBWCP_REG_WRITE(base, DESC_BASE_STRIDE, desc_stride); +} +EXPORT_SYMBOL(ubwcp_hw_set_buf_desc); + + +/* Value set here is returned upon read of an address that fails range check. + * Writes are ignored. + * Will also generate range_check_fail interrupt if enabled. + * if we don't program, default value is: 0x92929292 + */ +void ubwcp_hw_set_default_range_check_value(void __iomem *base, u32 val) +{ + UBWCP_REG_WRITE(base, RANGE_CHECK_FAIL, val); +} + + +void ubwcp_hw_version(void __iomem *base, u32 *major, u32 *minor) +{ + u32 version; + + version = UBWCP_REG_READ(base, VERSION_CONTROL); + *major = version & 0xF; + *minor = (version & 0xF0) >> 4; +} +EXPORT_SYMBOL(ubwcp_hw_version); + +/* TBD: */ +void ubwcp_hw_macro_tile_config(void __iomem *base) +{ + //TODO: In future add in support for LP4 + //May be able to determine DDR version via call to + //of_fdt_get_ddrtype() + + /* + * For Lanai assume 4 Channel LP5 DDR so from HSR + * MAL Size 32B + * Highest Bank Bit 16 + * Level 1 Bank Swizzling Disable + * Level 2 Bank Swizzling Enable + * Level 3 Bank Swizzling Enable + * Bank Spreading Enable + * Macrotiling Configuration (Num Channels) 8 + */ + UBWCP_REG_WRITE(base, CONFIG, 0x1E3); +} + +/* TBD: */ +void ubwcp_hw_decoder_config(void __iomem *base) +{ + /* + * For Lanai assume AMSBC (UBWC4.4/4.3) algorithm is used == b11 + * For Lanai assume 4 Channel LP5 DDR so MAL Size 32B == b0 + */ + UBWCP_REG_WRITE(base, DECODER_CONFIG, 0x7); +} + +/* TBD: */ +void ubwcp_hw_encoder_config(void __iomem *base) +{ + /* + * For Lanai assume AMSBC (UBWC4.4/4.3) algorithm is used == b11 + * For Lanai assume 4 Channel LP5 DDR so MAL Size 32B == b0 + */ + UBWCP_REG_WRITE(base, ENCODER_CONFIG, 0x7); +} + + +int ubwcp_hw_flush(void __iomem *base) +{ + u32 flush_complete = 0; + u32 count = 20; + + UBWCP_REG_WRITE(base, FLUSH_CONTROL, 0x3); + do { + flush_complete = UBWCP_REG_READ(base, FLUSH_STATUS) & 0x1; + if (flush_complete) { + UBWCP_REG_WRITE(base, FLUSH_CONTROL, 0x0); + return 0; + } + udelay(100); + } while (count--); + + ERR("~~~~~ FLUSH FAILED ~~~~~"); + return -1; +} +EXPORT_SYMBOL(ubwcp_hw_flush); + + +void ubwcp_hw_power_vote_status(void __iomem *pwr_ctrl, u8 *vote, u8 *status) +{ + u32 reg; + + reg = UBWCP_REG_READ(pwr_ctrl, 0); + *vote = (reg & BIT(0)) >> 0; + *status = (reg & BIT(31)) >> 31; + DBG("pwr_ctrl reg: 0x%x (vote = %d status = %d)", reg, *vote, *status); +} + +void ubwcp_hw_one_time_init(void __iomem *base) +{ + u32 reg; + + /* hack: set dataless hazard override bit */ + reg = UBWCP_REG_READ(base, OVERRIDE); + UBWCP_REG_WRITE(base, OVERRIDE, 0x2000); + reg = UBWCP_REG_READ(base, OVERRIDE); + + /* Configure SID */ + reg = UBWCP_REG_READ(base, QNS4_PARAMS); + reg &= ~(0x3F); + reg |= 0x1; /* desc buffer */ + reg |= (0 << 3); /* pixel data */ + UBWCP_REG_WRITE(base, QNS4_PARAMS, reg); + reg = UBWCP_REG_READ(base, QNS4_PARAMS); + + ubwcp_hw_decoder_config(base); + ubwcp_hw_encoder_config(base); + ubwcp_hw_macro_tile_config(base); +} +EXPORT_SYMBOL(ubwcp_hw_one_time_init); diff --git a/ubwcp/ubwcp_hw.h b/ubwcp/ubwcp_hw.h new file mode 100644 index 0000000000..17a5e684fe --- /dev/null +++ b/ubwcp/ubwcp_hw.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UBWCP_HW_H_ +#define __UBWCP_HW_H_ + +#define HW_BUFFER_FORMAT_RGBA 0x0 +#define HW_BUFFER_FORMAT_NV12 0x2 +#define HW_BUFFER_FORMAT_NV124R 0x4 +#define HW_BUFFER_FORMAT_P010 0x6 +#define HW_BUFFER_FORMAT_TP10 0x8 +#define HW_BUFFER_FORMAT_P016 0xA +#define HW_BUFFER_FORMAT_LINEAR 0xF + +/* interrupt id. also bit location for set/clear */ +#define INTERRUPT_READ_ERROR 0 +#define INTERRUPT_WRITE_ERROR 1 +#define INTERRUPT_DECODE_ERROR 2 +#define INTERRUPT_ENCODE_ERROR 3 + +/** + * struct msm_ubwcp_- UBWCP hardware instance + * dev:UBWCP device + * irq:Interrupt number + * clk:The bus clock for this IOMMU hardware instance + * pclk:The clock for the IOMMU IOMMU bus interconnect + */ +struct ubwcp_dev { + void __iomem *base; + struct device *dev; + int irq; + struct clk *clk; + struct clk *pclk; + /* TBD: + * struct list_head dev_node; + * struct list_head dom_node; + * struct list_head ctx_list; + * DECLARE_BITMAP(context_map, IOMMU_MAX_CBS) + * struct iommu_device iommu; + */ +}; + + +struct __packed ubwcp_hw_meta_metadata { + u64 uv_start_addr : 48; /* uv start address */ + u16 format : 16; /* format */ + u16 stride; /* image stride (bytes) */ + u16 stride_ubwcp; /* p010 stride for tp10 image (bytes) */ + u32 metadata_base_y; /* 24-bit page address */ + u32 metadata_base_uv; /* 24-bit page address */ + u16 buffer_y_offset; /* 4KB offset from meta_data_base_y */ + u16 buffer_uv_offset; /* 4KB offset from meta_data_base_y */ + u32 width_height; /* image width (bytes) */ +}; + +void ubwcp_hw_version(void __iomem *base, u32 *major, u32 *minor); +void ubwcp_hw_set_buf_desc(void __iomem *base, u64 desc_addr, u16 desc_stride); +void ubwcp_hw_enable_range_check(void __iomem *base, u16 index); +void ubwcp_hw_disable_range_check(void __iomem *base, u16 index); +void ubwcp_hw_set_range_check(void __iomem *base, u16 index, phys_addr_t pa, size_t size); +u64 ubwcp_hw_interrupt_src_address(void __iomem *base, u16 interrupt); +void ubwcp_hw_interrupt_clear(void __iomem *base, u16 interrupt); +void ubwcp_hw_interrupt_enable(void __iomem *base, u16 interrupt, bool enable); +void ubwcp_hw_power_on(void __iomem *pwr_ctrl, bool power_on); +void ubwcp_hw_one_time_init(void __iomem *base); +int ubwcp_hw_flush(void __iomem *base); + +//#define UBWCP_USE_SMC +#define UBWCP_DEBUG_REG_RW + +#endif /* __UBWCP_HW_H_ */ diff --git a/ubwcp/ubwcp_kernel_headers.py b/ubwcp/ubwcp_kernel_headers.py new file mode 100644 index 0000000000..20a71dceb8 --- /dev/null +++ b/ubwcp/ubwcp_kernel_headers.py @@ -0,0 +1,94 @@ + # Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + # Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + # + # This program is free software; you can redistribute it and/or modify it + # under the terms of the GNU General Public License version 2 as published by + # the Free Software Foundation. + # + # This program is distributed in the hope that it will be useful, but WITHOUT + # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + # more details. + # + # You should have received a copy of the GNU General Public License along with + # this program. If not, see . + +import argparse +import filecmp +import os +import re +import subprocess +import sys + +def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h): + if not h.startswith(prefix): + print('error: expected prefix [%s] on header [%s]' % (prefix, h)) + return False + + out_h = os.path.join(gen_dir, h[len(prefix):]) + (out_h_dirname, out_h_basename) = os.path.split(out_h) + env = os.environ.copy() + env["LOC_UNIFDEF"] = unifdef + cmd = ["sh", headers_install, h, out_h] + + if verbose: + print('run_headers_install: cmd is %s' % cmd) + + result = subprocess.call(cmd, env=env) + + if result != 0: + print('error: run_headers_install: cmd %s failed %d' % (cmd, result)) + return False + return True + +def gen_ubwcp_headers(verbose, gen_dir, headers_install, unifdef, ubwcp_include_uapi): + error_count = 0 + for h in ubwcp_include_uapi: + ubwcp_uapi_include_prefix = os.path.join(h.split('/include/uapi')[0], 'include', 'uapi') + os.sep + if not run_headers_install( + verbose, gen_dir, headers_install, unifdef, + ubwcp_uapi_include_prefix, h): error_count += 1 + return error_count + +def main(): + """Parse command line arguments and perform top level control.""" + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + + # Arguments that apply to every invocation of this script. + parser.add_argument( + '--verbose', action='store_true', + help='Print output that describes the workings of this script.') + parser.add_argument( + '--header_arch', required=True, + help='The arch for which to generate headers.') + parser.add_argument( + '--gen_dir', required=True, + help='Where to place the generated files.') + parser.add_argument( + '--ubwcp_include_uapi', required=True, nargs='*', + help='The list of techpack/*/include/uapi header files.') + parser.add_argument( + '--headers_install', required=True, + help='The headers_install tool to process input headers.') + parser.add_argument( + '--unifdef', + required=True, + help='The unifdef tool used by headers_install.') + + args = parser.parse_args() + + if args.verbose: + print('header_arch [%s]' % args.header_arch) + print('gen_dir [%s]' % args.gen_dir) + print('ubwcp_include_uapi [%s]' % args.ubwcp_include_uapi) + print('headers_install [%s]' % args.headers_install) + print('unifdef [%s]' % args.unifdef) + + return gen_ubwcp_headers(args.verbose, args.gen_dir, + args.headers_install, args.unifdef, args.ubwcp_include_uapi) + +if __name__ == '__main__': + sys.exit(main()) + diff --git a/ubwcp_kernel_product_board.mk b/ubwcp_kernel_product_board.mk index e69de29bb2..46f92c8012 100644 --- a/ubwcp_kernel_product_board.mk +++ b/ubwcp_kernel_product_board.mk @@ -0,0 +1,2 @@ +PRODUCT_PACKAGES += ubwcpx.ko + diff --git a/ubwcp_kernel_vendor_board.mk b/ubwcp_kernel_vendor_board.mk index e69de29bb2..c1babcfcbc 100644 --- a/ubwcp_kernel_vendor_board.mk +++ b/ubwcp_kernel_vendor_board.mk @@ -0,0 +1,2 @@ +BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ubwcpx.ko + From 010a4acc07698619add09dc25c97b044911784d0 Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Fri, 11 Nov 2022 17:29:04 -0800 Subject: [PATCH 04/35] ubwcp: r2 support and other misc changes - Program height in bytes for non-v1 hardware - Additional error checking at various places to prevent regulator call with NULL vdd - Set_ops fails when we do rmmod and then insmod ignore the return value so we can successfully insmod This is a hack that needs to be removed for final product Change-Id: Ib0252d7ddb7fa34aeeea7b1e1e3f81216e0cc5d3 Signed-off-by: Amol Jadi --- ubwcp/ubwcp.c | 82 ++++++++++++++++++++++++++++++++++-------------- ubwcp/ubwcp_hw.c | 32 ++++++++++++++++--- ubwcp/ubwcp_hw.h | 2 +- 3 files changed, 87 insertions(+), 29 deletions(-) diff --git a/ubwcp/ubwcp.c b/ubwcp/ubwcp.c index d2edfea97d..f7c3305146 100644 --- a/ubwcp/ubwcp.c +++ b/ubwcp/ubwcp.c @@ -53,7 +53,7 @@ MODULE_IMPORT_NS(DMA_BUF); } while (0) #define ERR(fmt, args...) pr_err("ubwcp: %s(): ~~~ERROR~~~: " fmt "\n", __func__, ##args) -#define FENTRY() DBG("ubwcp: %s()", __func__) +#define FENTRY() DBG("") #define META_DATA_PITCH_ALIGN 64 @@ -225,6 +225,16 @@ static int ubwcp_power(struct ubwcp_driver *ubwcp, bool enable) { int ret = 0; + if (!ubwcp) { + ERR("ubwcp ptr is NULL"); + return -1; + } + + if (!ubwcp->vdd) { + ERR("vdd is NULL"); + return -1; + } + if (enable) { ret = regulator_enable(ubwcp->vdd); if (ret < 0) { @@ -1363,7 +1373,14 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) mmdata->metadata_base_uv = PAGE_ADDR(iova_base + metadata_p0 + pixeldata_p0); mmdata->buffer_y_offset = PAGE_ADDR(metadata_p0); mmdata->buffer_uv_offset = PAGE_ADDR(metadata_p1); - mmdata->width_height = width_b << 16 | height_b; + + /* NOTE: For version 1.1, both width & height needs to be in bytes. + * For other versions, width in bytes & height in pixels. + */ + if ((ubwcp->hw_ver_major == 1) && (ubwcp->hw_ver_minor == 1)) + mmdata->width_height = width_b << 16 | height_b; + else + mmdata->width_height = width_b << 16 | attr->height; print_mmdata_desc(mmdata); @@ -1583,6 +1600,7 @@ err: */ static int unlock_internal(struct ubwcp_buf *buf, enum dma_data_direction dir, bool free_buffer) { + int ret = 0; struct ubwcp_driver *ubwcp; DBG("current lock_count: %d", buf->lock_count); @@ -1606,16 +1624,16 @@ static int unlock_internal(struct ubwcp_buf *buf, enum dma_data_direction dir, b /* TODO: Use flush work around, remove when no longer needed */ ubwcp_flush_cache_wa(ubwcp->dev, buf->ula_pa, buf->ula_size); - /* TBD: confirm with HW if this should be done before or - * after disable_range_ck() - */ - ubwcp_flush(ubwcp); - - /* disable range check */ + /* disable range check with ubwcp flush */ DBG("disabling range check"); + //TBD: could combine these 2 locks into a single lock to make it simpler + mutex_lock(&ubwcp->ubwcp_flush_lock); mutex_lock(&ubwcp->hw_range_ck_lock); - ubwcp_hw_disable_range_check(ubwcp->base, buf->desc->idx); + ret = ubwcp_hw_disable_range_check_with_flush(ubwcp->base, buf->desc->idx); + if (ret) + ERR("disable_range_check_with_flush() failed: %d", ret); mutex_unlock(&ubwcp->hw_range_ck_lock); + mutex_unlock(&ubwcp->ubwcp_flush_lock); /* release descriptor if perm range xlation is not set */ if (!buf->perm) { @@ -1623,7 +1641,7 @@ static int unlock_internal(struct ubwcp_buf *buf, enum dma_data_direction dir, b buf->desc = NULL; } buf->locked = false; - return 0; + return ret; } @@ -2146,10 +2164,10 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) /* Regulator */ ubwcp->vdd = devm_regulator_get(ubwcp_dev, "vdd"); - if (IS_ERR(ubwcp->vdd)) { + if (IS_ERR_OR_NULL(ubwcp->vdd)) { ret = PTR_ERR(ubwcp->vdd); ERR("devm_regulator_get() failed: %d", ret); - return ret; + return -1; } mutex_init(&ubwcp->desc_lock); @@ -2193,10 +2211,7 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) /* one time hw init */ ubwcp_hw_one_time_init(ubwcp->base); ubwcp_hw_version(ubwcp->base, &ubwcp->hw_ver_major, &ubwcp->hw_ver_minor); - DBG("read version: major %d, minor %d", - ubwcp->hw_ver_major, ubwcp->hw_ver_minor); - - + pr_err("ubwcp: hw version: major %d, minor %d\n", ubwcp->hw_ver_major, ubwcp->hw_ver_minor); if (ubwcp->hw_ver_major == 0) { ERR("Failed to read HW version"); ret = -1; @@ -2205,7 +2220,6 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) /* set pdev->dev->driver_data = ubwcp */ platform_set_drvdata(pdev, ubwcp); - me = ubwcp; /* enable all 4 interrupts */ ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_READ_ERROR, true); @@ -2221,11 +2235,17 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) ret = msm_ubwcp_set_ops(ubwcp_init_buffer, ubwcp_free_buffer, ubwcp_lock, ubwcp_unlock); if (ret) { - ERR("msm_ubwcp_set_ops() failed: %d", ret); - goto err_power_off; + ERR("msm_ubwcp_set_ops() failed: %d, but IGNORED", ret); + /* TBD: ignore return error during testing phase. + * This allows us to rmmod/insmod for faster dev cycle. + * In final version: return error and de-register driver if set_ops fails. + */ + ret = 0; + //goto err_power_off; } else { DBG("msm_ubwcp_set_ops(): success"); } + me = ubwcp; return ret; err_power_off: @@ -2237,7 +2257,6 @@ err_pool_add: gen_pool_destroy(ubwcp->ula_pool); err_pool_create: ubwcp_cdev_deinit(ubwcp); - return ret; } @@ -2297,14 +2316,31 @@ static int ubwcp_probe_cb_desc(struct platform_device *pdev) DBG("desc_base = %p size = %zu", ubwcp->buffer_desc_base, ubwcp->buffer_desc_size); - //TBD: - ubwcp_power(ubwcp, true); + ret = ubwcp_power(ubwcp, true); + if (ret) { + ERR("failed to power on"); + goto err; + } ubwcp_hw_set_buf_desc(ubwcp->base, (u64) ubwcp->buffer_desc_dma_handle, UBWCP_BUFFER_DESC_OFFSET); - ubwcp_power(ubwcp, false); + ret = ubwcp_power(ubwcp, false); + if (ret) { + ERR("failed to power off"); + goto err; + } return ret; + +err: + dma_free_coherent(ubwcp->dev_desc_cb, + ubwcp->buffer_desc_size, + ubwcp->buffer_desc_base, + ubwcp->buffer_desc_dma_handle); + ubwcp->buffer_desc_base = NULL; + ubwcp->buffer_desc_dma_handle = 0; + ubwcp->dev_desc_cb = NULL; + return -1; } /* buffer context bank device remove */ diff --git a/ubwcp/ubwcp_hw.c b/ubwcp/ubwcp_hw.c index 18e6d720c4..b955c507f8 100644 --- a/ubwcp/ubwcp_hw.c +++ b/ubwcp/ubwcp_hw.c @@ -224,16 +224,38 @@ void ubwcp_hw_enable_range_check(void __iomem *base, u16 index) } EXPORT_SYMBOL(ubwcp_hw_enable_range_check); -void ubwcp_hw_disable_range_check(void __iomem *base, u16 index) + +/* Disable range check with flush */ +int ubwcp_hw_disable_range_check_with_flush(void __iomem *base, u16 index) { + u32 flush_complete = 0; + u32 count = 20; u32 val; u16 ctrl_reg = index >> 5; - val = UBWCP_REG_READ(base, RANGE_CHECK_CONTROL + ctrl_reg*4); - val &= ~(1 << (index & 0x1F)); - UBWCP_REG_WRITE(base, RANGE_CHECK_CONTROL + ctrl_reg*4, val); + //assert flush + UBWCP_REG_WRITE(base, FLUSH_CONTROL, 0x3); + + //poll for flush done + do { + flush_complete = UBWCP_REG_READ(base, FLUSH_STATUS) & 0x1; + if (flush_complete) { + //disable range ck + val = UBWCP_REG_READ(base, RANGE_CHECK_CONTROL + ctrl_reg*4); + val &= ~(1 << (index & 0x1F)); + UBWCP_REG_WRITE(base, RANGE_CHECK_CONTROL + ctrl_reg*4, val); + + //clear flush + UBWCP_REG_WRITE(base, FLUSH_CONTROL, 0x0); + return 0; + } + udelay(100); + } while (count--); + + ERR("~~~~~ FLUSH FAILED ~~~~~"); + return -1; } -EXPORT_SYMBOL(ubwcp_hw_disable_range_check); +EXPORT_SYMBOL(ubwcp_hw_disable_range_check_with_flush); void ubwcp_hw_set_buf_desc(void __iomem *base, u64 desc_addr, u16 desc_stride) { diff --git a/ubwcp/ubwcp_hw.h b/ubwcp/ubwcp_hw.h index 17a5e684fe..d83191f38f 100644 --- a/ubwcp/ubwcp_hw.h +++ b/ubwcp/ubwcp_hw.h @@ -58,7 +58,7 @@ struct __packed ubwcp_hw_meta_metadata { void ubwcp_hw_version(void __iomem *base, u32 *major, u32 *minor); void ubwcp_hw_set_buf_desc(void __iomem *base, u64 desc_addr, u16 desc_stride); void ubwcp_hw_enable_range_check(void __iomem *base, u16 index); -void ubwcp_hw_disable_range_check(void __iomem *base, u16 index); +int ubwcp_hw_disable_range_check_with_flush(void __iomem *base, u16 index); void ubwcp_hw_set_range_check(void __iomem *base, u16 index, phys_addr_t pa, size_t size); u64 ubwcp_hw_interrupt_src_address(void __iomem *base, u16 interrupt); void ubwcp_hw_interrupt_clear(void __iomem *base, u16 interrupt); From ff2498ad42889d5f87a53105d1f660f3c7561674 Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Tue, 15 Nov 2022 16:01:58 -0800 Subject: [PATCH 05/35] ubwcp: Keep UBWC-P powered on after buffer alloc Always keep UBWC-P powered on after first UBWC-P buffer is allocated. Change-Id: If1785e53f8b9015032094ba20ead7b2cb6f05f63 Signed-off-by: Liam Mark --- ubwcp/ubwcp.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ubwcp/ubwcp.c b/ubwcp/ubwcp.c index f7c3305146..2390df680e 100644 --- a/ubwcp/ubwcp.c +++ b/ubwcp/ubwcp.c @@ -1852,8 +1852,7 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) } else { DBG("DONE: calling remove_memory() for ULA PA pool"); } - DBG("Calling power OFF ..."); - ubwcp_power(ubwcp, false); + DBG("Don't Call power OFF ..."); } mutex_unlock(&ubwcp->buf_table_lock); return ret; From ad3b9b9adabe9e6bac0d88e4283af2c12ba0f50e Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Tue, 22 Nov 2022 11:35:06 -0800 Subject: [PATCH 06/35] ubwcp: update one time config - enable padding & scc bit Change-Id: I6b74c98b73e13d34565527aeb11ad9ffd1c55ec6 Signed-off-by: Amol Jadi --- ubwcp/ubwcp_hw.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ubwcp/ubwcp_hw.c b/ubwcp/ubwcp_hw.c index b955c507f8..ed3e485038 100644 --- a/ubwcp/ubwcp_hw.c +++ b/ubwcp/ubwcp_hw.c @@ -62,6 +62,7 @@ MODULE_LICENSE("GPL"); #define QNS4_PARAMS 0x1124 #define OVERRIDE 0x112C #define VERSION_CONTROL 0x1130 +#define SPARE 0x1188 /* read/write register */ @@ -363,9 +364,12 @@ void ubwcp_hw_one_time_init(void __iomem *base) u32 reg; /* hack: set dataless hazard override bit */ - reg = UBWCP_REG_READ(base, OVERRIDE); UBWCP_REG_WRITE(base, OVERRIDE, 0x2000); - reg = UBWCP_REG_READ(base, OVERRIDE); + + /* Spare reg config: set bit-9: SCC & bit-1: padding */ + reg = UBWCP_REG_READ(base, SPARE); + reg |= BIT(9) | BIT(1); + UBWCP_REG_WRITE(base, SPARE, reg); /* Configure SID */ reg = UBWCP_REG_READ(base, QNS4_PARAMS); @@ -373,7 +377,6 @@ void ubwcp_hw_one_time_init(void __iomem *base) reg |= 0x1; /* desc buffer */ reg |= (0 << 3); /* pixel data */ UBWCP_REG_WRITE(base, QNS4_PARAMS, reg); - reg = UBWCP_REG_READ(base, QNS4_PARAMS); ubwcp_hw_decoder_config(base); ubwcp_hw_encoder_config(base); From bbc9e0b644edfba273452a8c512a24218a11eefe Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Thu, 1 Dec 2022 15:29:30 -0800 Subject: [PATCH 07/35] ubwcp: Switch to using offline_and_remove_memory remove_memory doesn't work when memory hotplug has been enabled, switch to using offline_and_remove_memory. Change-Id: Ia62efc9394326cde5bb0a5dd76ba811f9b1d4b17 Signed-off-by: Liam Mark --- ubwcp/ubwcp.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ubwcp/ubwcp.c b/ubwcp/ubwcp.c index 2390df680e..0043e9673b 100644 --- a/ubwcp/ubwcp.c +++ b/ubwcp/ubwcp.c @@ -1842,15 +1842,16 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) * care of flush. Just a note for now. Might need to add the * flush here for debug purpose. */ - DBG("Calling remove_memory() for ULA PA pool"); - ret = remove_memory(ubwcp->ula_pool_base, ubwcp->ula_pool_size); + DBG("Calling offline_and_remove_memory() for ULA PA pool"); + ret = offline_and_remove_memory(ubwcp->ula_pool_base, + ubwcp->ula_pool_size); if (ret) { - ERR("remove_memory failed st:0x%lx sz:0x%lx err: %d", + ERR("offline_and_remove_memory failed st:0x%lx sz:0x%lx err: %d", ubwcp->ula_pool_base, ubwcp->ula_pool_size, ret); goto err_remove_mem; } else { - DBG("DONE: calling remove_memory() for ULA PA pool"); + DBG("DONE: calling offline_and_remove_memory() for ULA PA pool"); } DBG("Don't Call power OFF ..."); } From a4283e7c90cb925134c9b107f9716f08e6582757 Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Mon, 5 Dec 2022 14:51:28 -0800 Subject: [PATCH 08/35] ubwcp: remove flush workaround - workaround is not needed for read-only cases on v1 Change-Id: I6977f0ff1e7990ecfdcea5319e17b47c0106be27 Signed-off-by: Amol Jadi --- ubwcp/ubwcp.c | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/ubwcp/ubwcp.c b/ubwcp/ubwcp.c index 0043e9673b..de70d1cb28 100644 --- a/ubwcp/ubwcp.c +++ b/ubwcp/ubwcp.c @@ -1446,21 +1446,6 @@ static struct ubwcp_desc *ubwcp_buf_desc_allocate(struct ubwcp_driver *ubwcp) return NULL; } -#define FLUSH_WA_SIZE 64 -#define FLUSH_WA_UDELAY 89 -void ubwcp_flush_cache_wa(struct device *dev, phys_addr_t paddr, size_t size) -{ - phys_addr_t cline = paddr; - int num_line = size / FLUSH_WA_SIZE; - int i; - - for (i = 0; i < num_line; i++) { - dma_sync_single_for_cpu(dev, cline, FLUSH_WA_SIZE, 0); - udelay(FLUSH_WA_UDELAY); - cline += FLUSH_WA_SIZE; - } -} - /** * Lock buffer for CPU access. This prepares ubwcp hw to allow * CPU access to the compressed buffer. It will perform @@ -1595,8 +1580,6 @@ err: /* This can be called as a result of external unlock() call or * internally if free() is called without unlock(). - * It can fail only for 1 reason: ubwcp_flush fails. currently we are ignoring the flush failure - * because it is hardware failure and no recovery path is defined. */ static int unlock_internal(struct ubwcp_buf *buf, enum dma_data_direction dir, bool free_buffer) { @@ -1620,9 +1603,7 @@ static int unlock_internal(struct ubwcp_buf *buf, enum dma_data_direction dir, b /* Flush/invalidate ULA PA from CPU caches */ //TBD: if (dir == WRITE or BIDIRECTION) - //dma_sync_single_for_device(ubwcp->dev, buf->ula_pa, buf->ula_size, dir); - /* TODO: Use flush work around, remove when no longer needed */ - ubwcp_flush_cache_wa(ubwcp->dev, buf->ula_pa, buf->ula_size); + dma_sync_single_for_device(ubwcp->dev, buf->ula_pa, buf->ula_size, dir); /* disable range check with ubwcp flush */ DBG("disabling range check"); From cb8c724b744717922ed93ae7bec59ae7440392b5 Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Thu, 1 Dec 2022 17:18:26 -0800 Subject: [PATCH 09/35] ubwcp: Ensure UV plane is aligned Ensure linear aperture UV plane meets UBWC-P alignment requirements. Change-Id: Ibdc887c2b7c6f4981d5587b2caca1b2c653893ae Signed-off-by: Liam Mark --- ubwcp/ubwcp.c | 93 +++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 86 insertions(+), 7 deletions(-) diff --git a/ubwcp/ubwcp.c b/ubwcp/ubwcp.c index 0043e9673b..8edacf5bad 100644 --- a/ubwcp/ubwcp.c +++ b/ubwcp/ubwcp.c @@ -716,11 +716,23 @@ static size_t pixeldata_buf_sz(struct ubwcp_driver *ubwcp, return size; } +static int get_tile_height(struct ubwcp_driver *ubwcp, enum ubwcp_std_image_format format, + u8 plane) +{ + struct ubwcp_image_format_info f_info; + struct ubwcp_plane_info p_info; + + f_info = ubwcp->format_info[format]; + p_info = f_info.p_info[plane]; + return p_info.tilesize_p.height; +} + /* * plane: must be 0 or 1 (1st plane == 0, 2nd plane == 1) */ static size_t ubwcp_ula_size(struct ubwcp_driver *ubwcp, u16 format, - u32 stride_b, u32 scanlines, u8 plane) + u32 stride_b, u32 scanlines, u8 plane, + bool add_tile_pad) { size_t size; @@ -728,6 +740,13 @@ static size_t ubwcp_ula_size(struct ubwcp_driver *ubwcp, u16 format, /* UV plane */ if (plane == 1) scanlines = scanlines/2; + + if (add_tile_pad) { + int tile_height = get_tile_height(ubwcp, format, plane); + + /* Align plane size to plane tile height */ + scanlines = ((scanlines + tile_height - 1) / tile_height) * tile_height; + } size = stride_b*scanlines; DBG_BUF_ATTR("Size of plane-%u: (%u * %u) = %zu (0x%zx)", plane, stride_b, scanlines, size, size); @@ -838,6 +857,52 @@ unsigned int ubwcp_get_hw_image_format_value(u16 ioctl_image_format) } } +static int ubwcp_validate_uv_align(struct ubwcp_driver *ubwcp, + struct ubwcp_buffer_attrs *attr, + size_t ula_y_plane_size, + size_t uv_start_offset) +{ + int ret = 0; + size_t ula_y_plane_size_align; + size_t y_tile_align_bytes; + int y_tile_height; + int planes; + + /* Only validate UV align if there is both a Y and UV plane */ + planes = planes_in_format(to_std_format(attr->image_format)); + if (planes != 2) + return 0; + + /* Check it is cache line size aligned */ + if ((uv_start_offset % 64) != 0) { + ret = -EINVAL; + ERR("uv_start_offset %zu not cache line aligned", + uv_start_offset); + goto err; + } + + /* + * Check that UV plane does not overlap with any of the Y plane’s tiles + */ + y_tile_height = get_tile_height(ubwcp, to_std_format(attr->image_format), 0); + y_tile_align_bytes = y_tile_height * attr->stride; + ula_y_plane_size_align = ((ula_y_plane_size + y_tile_align_bytes - 1) / + y_tile_align_bytes) * y_tile_align_bytes; + + if (uv_start_offset < ula_y_plane_size_align) { + ret = -EINVAL; + ERR("uv offset %zu less than y plane align %zu for y plane size %zu", + uv_start_offset, ula_y_plane_size_align, + ula_y_plane_size); + goto err; + } + + return 0; + +err: + return ret; +} + /* calculate ULA buffer parms * TBD: how do we make sure uv_start address (not the offset) * is aligned per requirement: cache line @@ -845,6 +910,7 @@ unsigned int ubwcp_get_hw_image_format_value(u16 ioctl_image_format) static int ubwcp_calc_ula_params(struct ubwcp_driver *ubwcp, struct ubwcp_buffer_attrs *attr, size_t *ula_size, + size_t *ula_y_plane_size, size_t *uv_start_offset) { size_t size; @@ -881,24 +947,30 @@ static int ubwcp_calc_ula_params(struct ubwcp_driver *ubwcp, if (planes == 1) { /* uv_start beyond ULA range */ - size = ubwcp_ula_size(ubwcp, format, stride, scanlines, 0); + size = ubwcp_ula_size(ubwcp, format, stride, scanlines, 0, true); *uv_start_offset = size; + *ula_y_plane_size = size; } else { if (!missing_plane) { /* size for both planes and padding */ - size = ubwcp_ula_size(ubwcp, format, stride, scanlines, 0); + + /* Don't pad out Y plane as client would not expect this padding */ + size = ubwcp_ula_size(ubwcp, format, stride, scanlines, 0, false); + *ula_y_plane_size = size; size += planar_padding; *uv_start_offset = size; - size += ubwcp_ula_size(ubwcp, format, stride, scanlines, 1); + size += ubwcp_ula_size(ubwcp, format, stride, scanlines, 1, true); } else { if (missing_plane == 2) { /* Y-only image, set uv_start beyond ULA range */ - size = ubwcp_ula_size(ubwcp, format, stride, scanlines, 0); + size = ubwcp_ula_size(ubwcp, format, stride, scanlines, 0, true); *uv_start_offset = size; + *ula_y_plane_size = size; } else { /* first plane data is not there */ - size = ubwcp_ula_size(ubwcp, format, stride, scanlines, 1); + size = ubwcp_ula_size(ubwcp, format, stride, scanlines, 1, true); *uv_start_offset = 0; /* uv data is at the beginning */ + *ula_y_plane_size = 0; } } } @@ -1169,6 +1241,7 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) int ret = 0; size_t ula_size = 0; size_t uv_start_offset = 0; + size_t ula_y_plane_size = 0; phys_addr_t ula_pa = 0x0; struct ubwcp_buf *buf; struct ubwcp_driver *ubwcp; @@ -1259,12 +1332,18 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) DBG_BUF_ATTR(""); DBG_BUF_ATTR(""); DBG_BUF_ATTR("Calculating ula params -->"); - ret = ubwcp_calc_ula_params(ubwcp, attr, &ula_size, &uv_start_offset); + ret = ubwcp_calc_ula_params(ubwcp, attr, &ula_size, &ula_y_plane_size, &uv_start_offset); if (ret) { ERR("ubwcp_calc_ula_params() failed: %d", ret); goto err; } + ret = ubwcp_validate_uv_align(ubwcp, attr, ula_y_plane_size, uv_start_offset); + if (ret) { + ERR("ubwcp_validate_uv_align() failed: %d", ret); + goto err; + } + DBG_BUF_ATTR(""); DBG_BUF_ATTR(""); DBG_BUF_ATTR("Calculating ubwcp params -->"); From bb3a2245197963852dcad16f02df2f042b31ca32 Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Wed, 4 Jan 2023 17:02:42 -0800 Subject: [PATCH 10/35] ubwcp: Add clocks support Add support in the UBWC-P driver to enable the clocks listed in the DT. Change-Id: Ib94eadb89cc7e9901641570ff0b25d45306b0a13 Signed-off-by: Liam Mark --- ubwcp/ubwcp.c | 89 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 88 insertions(+), 1 deletion(-) diff --git a/ubwcp/ubwcp.c b/ubwcp/ubwcp.c index fdd677623f..0f11bcbaa5 100644 --- a/ubwcp/ubwcp.c +++ b/ubwcp/ubwcp.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -24,6 +24,7 @@ #include #include #include +#include MODULE_IMPORT_NS(DMA_BUF); @@ -113,6 +114,9 @@ struct ubwcp_driver { void __iomem *base; //ubwcp base address struct regulator *vdd; + struct clk **clocks; + int num_clocks; + /* interrupts */ int irq_range_ck_rd; int irq_range_ck_wr; @@ -220,6 +224,67 @@ static void ubwcp_buf_desc_list_init(struct ubwcp_driver *ubwcp) } } +static int ubwcp_init_clocks(struct ubwcp_driver *ubwcp, struct device *dev) +{ + const char *cname; + struct property *prop; + int i; + + ubwcp->num_clocks = + of_property_count_strings(dev->of_node, "clock-names"); + + if (ubwcp->num_clocks < 1) { + ubwcp->num_clocks = 0; + return 0; + } + + ubwcp->clocks = devm_kzalloc(dev, + sizeof(*ubwcp->clocks) * ubwcp->num_clocks, GFP_KERNEL); + if (!ubwcp->clocks) + return -ENOMEM; + + i = 0; + of_property_for_each_string(dev->of_node, "clock-names", + prop, cname) { + struct clk *c = devm_clk_get(dev, cname); + + if (IS_ERR(c)) { + ERR("Couldn't get clock: %s\n", cname); + return PTR_ERR(c); + } + + ubwcp->clocks[i] = c; + + ++i; + } + return 0; +} + +static int ubwcp_enable_clocks(struct ubwcp_driver *ubwcp) +{ + int i, ret = 0; + + for (i = 0; i < ubwcp->num_clocks; ++i) { + ret = clk_prepare_enable(ubwcp->clocks[i]); + if (ret) { + ERR("Couldn't enable clock #%d\n", i); + while (i--) + clk_disable_unprepare(ubwcp->clocks[i]); + break; + } + } + + return ret; +} + +static void ubwcp_disable_clocks(struct ubwcp_driver *ubwcp) +{ + int i; + + for (i = ubwcp->num_clocks; i; --i) + clk_disable_unprepare(ubwcp->clocks[i - 1]); +} + /* UBWCP Power control */ static int ubwcp_power(struct ubwcp_driver *ubwcp, bool enable) { @@ -243,6 +308,16 @@ static int ubwcp_power(struct ubwcp_driver *ubwcp, bool enable) } else { DBG("regulator_enable() success"); } + + if (!ret) { + ret = ubwcp_enable_clocks(ubwcp); + if (ret) { + ERR("enable clocks failed: %d", ret); + regulator_disable(ubwcp->vdd); + } else { + DBG("enable clocks success"); + } + } } else { ret = regulator_disable(ubwcp->vdd); if (ret < 0) { @@ -251,7 +326,13 @@ static int ubwcp_power(struct ubwcp_driver *ubwcp, bool enable) } else { DBG("regulator_disable() success"); } + + if (!ret) { + ubwcp_disable_clocks(ubwcp); + DBG("disable clocks success"); + } } + return ret; } @@ -2230,6 +2311,12 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) return -1; } + ret = ubwcp_init_clocks(ubwcp, ubwcp_dev); + if (ret) { + ERR("failed to initialize ubwcp clocks err: %d", ret); + return ret; + } + mutex_init(&ubwcp->desc_lock); mutex_init(&ubwcp->buf_table_lock); mutex_init(&ubwcp->ula_lock); From 34771eb84c260cc473853b69f34e10518b83d9fa Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Thu, 5 Jan 2023 14:43:54 -0800 Subject: [PATCH 11/35] ubwcp: Add kernel error handler support Add kernel error handler support Change-Id: If7cdb1fe2858491ee6c523637eaf1bd6f7626e4d Signed-off-by: Liam Mark --- ubwcp/Android.bp | 4 +- ubwcp/Android.mk | 2 + ubwcp/{ => include/kernel}/ubwcp.h | 75 +++++++- ubwcp/ubwcp.c | 270 ++++++++++++++++++++++++++--- ubwcp/ubwcp_kernel_headers.py | 51 ++++-- 5 files changed, 363 insertions(+), 39 deletions(-) rename ubwcp/{ => include/kernel}/ubwcp.h (51%) diff --git a/ubwcp/Android.bp b/ubwcp/Android.bp index c51f9a23cf..2328a6e24f 100644 --- a/ubwcp/Android.bp +++ b/ubwcp/Android.bp @@ -1,9 +1,11 @@ headers_src = [ "include/uapi/*.h", + "include/kernel/*.h", ] ubwcp_headers_out = [ "ubwcp_ioctl.h", + "ubwcp.h", ] ubwcp_kernel_headers_verbose = "--verbose " @@ -21,7 +23,7 @@ genrule { ubwcp_kernel_headers_verbose + "--header_arch arm64 " + "--gen_dir $(genDir) " + - "--ubwcp_include_uapi $(locations include/uapi/*.h) " + + "--ubwcp_include $(locations include/uapi/*.h) $(locations include/kernel/*.h) " + "--unifdef $(location unifdef) " + "--headers_install $(location headers_install.sh)", out: ubwcp_headers_out, diff --git a/ubwcp/Android.mk b/ubwcp/Android.mk index 0f9256867d..18891ae55e 100644 --- a/ubwcp/Android.mk +++ b/ubwcp/Android.mk @@ -5,6 +5,8 @@ LOCAL_PATH := $(call my-dir) include $(CLEAR_VARS) # For incremental compilation LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/*) +LOCAL_EXPORT_KO_INCLUDE_DIRS := $(LOCAL_PATH)/include/uapi +LOCAL_EXPORT_KO_INCLUDE_DIRS += $(LOCAL_PATH)/include/kernel LOCAL_MODULE := ubwcpx.ko LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk diff --git a/ubwcp/ubwcp.h b/ubwcp/include/kernel/ubwcp.h similarity index 51% rename from ubwcp/ubwcp.h rename to ubwcp/include/kernel/ubwcp.h index b2eab37beb..7fe7018dc4 100644 --- a/ubwcp/ubwcp.h +++ b/ubwcp/include/kernel/ubwcp.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __UBWCP_H_ @@ -9,7 +9,7 @@ #include #include -#include "include/uapi/ubwcp_ioctl.h" +#include "../uapi/ubwcp_ioctl.h" typedef int (*configure_mmap)(struct dma_buf *dmabuf, bool linear, phys_addr_t ula_pa_addr, @@ -62,4 +62,75 @@ int ubwcp_get_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) */ int ubwcp_set_perm_range_translation(struct dma_buf *dmabuf, bool enable); +enum ubwcp_error { + UBWCP_ENCODE_ERROR = 0, + UBWCP_DECODE_ERROR, + UBWCP_RANGE_TRANSLATION_ERROR, + UBWCP_SMMU_FAULT, + UBWCP_UNKNOWN_ERROR, +}; + +enum iommu_cb_id { + UBWCP_DESC_CB_ID = 0, + UBWCP_BUF_CB_ID, + UBWCP_UNKNOWN_CB_ID, +}; + +struct ubwcp_enc_err_info { + struct dma_buf *dmabuf; + phys_addr_t ula_pa; +}; + +struct ubwcp_dec_err_info { + struct dma_buf *dmabuf; + phys_addr_t ula_pa; +}; + +struct ubwcp_translation_err_info { + struct dma_buf *dmabuf; + phys_addr_t ula_pa; + bool read; +}; + +struct ubwcp_smmu_fault_err_info { + struct dma_buf *dmabuf; + unsigned long iova; + enum iommu_cb_id iommu_dev_id; + int iommu_fault_flags; +}; + +struct unwcp_err_info { + enum ubwcp_error err_code; + union { + struct ubwcp_enc_err_info enc_err; + struct ubwcp_dec_err_info dec_err; + struct ubwcp_translation_err_info translation_err; + struct ubwcp_smmu_fault_err_info smmu_err; + }; +}; + +typedef void (*ubwcp_error_handler_t)(struct unwcp_err_info *err, void *data); + +/* + * Register an error handler + * + * @param client_id : not currently supported (pass in -1) + * @param handler : the error handler function which will be called when an + * error occurs + * @param data : data pointer provided with the error handler function + * + * @return int : 0 on success, otherwise error code + */ +int ubwcp_register_error_handler(u32 client_id, ubwcp_error_handler_t handler, + void *data); + +/* + * Unregister an error handler + * + * @param client_id : client id of handler to unregister (pass in -1) + * + * @return int : 0 on success, otherwise error code + */ +int ubwcp_unregister_error_handler(u32 client_id); + #endif /* __UBWCP_H_ */ diff --git a/ubwcp/ubwcp.c b/ubwcp/ubwcp.c index 0f11bcbaa5..56661e1604 100644 --- a/ubwcp/ubwcp.c +++ b/ubwcp/ubwcp.c @@ -25,10 +25,11 @@ #include #include #include +#include MODULE_IMPORT_NS(DMA_BUF); -#include "ubwcp.h" +#include "include/kernel/ubwcp.h" #include "ubwcp_hw.h" #include "include/uapi/ubwcp_ioctl.h" @@ -148,10 +149,13 @@ struct ubwcp_driver { struct ubwcp_image_format_info format_info[INFO_FORMAT_LIST_SIZE]; struct mutex desc_lock; /* allocate/free descriptors */ - struct mutex buf_table_lock; /* add/remove dma_buf into list of managed bufffers */ + spinlock_t buf_table_lock; /* add/remove dma_buf into list of managed bufffers */ + struct mutex mem_hotplug_lock; /* memory hotplug lock */ struct mutex ula_lock; /* allocate/free ula */ struct mutex ubwcp_flush_lock; /* ubwcp flush */ struct mutex hw_range_ck_lock; /* range ck */ + struct list_head err_handler_list; /* error handler list */ + spinlock_t err_handler_list_lock; /* err_handler_list lock */ }; struct ubwcp_buf { @@ -378,17 +382,18 @@ static struct ubwcp_buf *dma_buf_to_ubwcp_buf(struct dma_buf *dmabuf) { struct ubwcp_buf *buf = NULL; struct ubwcp_driver *ubwcp = ubwcp_get_driver(); + unsigned long flags; if (!dmabuf || !ubwcp) return NULL; - mutex_lock(&ubwcp->buf_table_lock); + spin_lock_irqsave(&ubwcp->buf_table_lock, flags); /* look up ubwcp_buf corresponding to this dma_buf */ hash_for_each_possible(ubwcp->buf_table, buf, hnode, (u64)dmabuf) { if (buf->dma_buf == dmabuf) break; } - mutex_unlock(&ubwcp->buf_table_lock); + spin_unlock_irqrestore(&ubwcp->buf_table_lock, flags); return buf; } @@ -432,6 +437,8 @@ static int ubwcp_init_buffer(struct dma_buf *dmabuf) int nid; struct ubwcp_buf *buf; struct ubwcp_driver *ubwcp = ubwcp_get_driver(); + unsigned long flags; + bool table_empty; FENTRY(); @@ -458,9 +465,11 @@ static int ubwcp_init_buffer(struct dma_buf *dmabuf) buf->dma_buf = dmabuf; buf->ubwcp = ubwcp; - mutex_lock(&ubwcp->buf_table_lock); - if (hash_empty(ubwcp->buf_table)) { - + mutex_lock(&ubwcp->mem_hotplug_lock); + spin_lock_irqsave(&ubwcp->buf_table_lock, flags); + table_empty = hash_empty(ubwcp->buf_table); + spin_unlock_irqrestore(&ubwcp->buf_table_lock, flags); + if (table_empty) { ret = ubwcp_power(ubwcp, true); if (ret) goto err_power_on; @@ -481,14 +490,16 @@ static int ubwcp_init_buffer(struct dma_buf *dmabuf) page_to_virt(pfn_to_page(PFN_DOWN(ubwcp->ula_pool_base)))); } } + spin_lock_irqsave(&ubwcp->buf_table_lock, flags); hash_add(ubwcp->buf_table, &buf->hnode, (u64)buf->dma_buf); - mutex_unlock(&ubwcp->buf_table_lock); + spin_unlock_irqrestore(&ubwcp->buf_table_lock, flags); + mutex_unlock(&ubwcp->mem_hotplug_lock); return ret; err_add_memory: ubwcp_power(ubwcp, false); err_power_on: - mutex_unlock(&ubwcp->buf_table_lock); + mutex_unlock(&ubwcp->mem_hotplug_lock); kfree(buf); if (!ret) ret = -1; @@ -1937,6 +1948,8 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) int ret = 0; struct ubwcp_buf *buf; struct ubwcp_driver *ubwcp; + bool table_empty; + unsigned long flags; FENTRY(); @@ -1971,12 +1984,16 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) if (buf->buf_attr_set) reset_buf_attrs(buf); - mutex_lock(&ubwcp->buf_table_lock); + mutex_lock(&ubwcp->mem_hotplug_lock); + spin_lock_irqsave(&ubwcp->buf_table_lock, flags); hash_del(&buf->hnode); + table_empty = hash_empty(ubwcp->buf_table); + spin_unlock_irqrestore(&ubwcp->buf_table_lock, flags); + kfree(buf); /* If this is the last buffer being freed, power off ubwcp */ - if (hash_empty(ubwcp->buf_table)) { + if (table_empty) { DBG("last buffer: ~~~~~~~~~~~"); /* TBD: If everything is working fine, ubwcp_flush() should not * be needed here. Each buffer free logic should be taking @@ -1996,11 +2013,11 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) } DBG("Don't Call power OFF ..."); } - mutex_unlock(&ubwcp->buf_table_lock); + mutex_unlock(&ubwcp->mem_hotplug_lock); return ret; err_remove_mem: - mutex_unlock(&ubwcp->buf_table_lock); + mutex_unlock(&ubwcp->mem_hotplug_lock); if (!ret) ret = -1; DBG("returning error: %d", ret); @@ -2142,14 +2159,183 @@ static void ubwcp_cdev_deinit(struct ubwcp_driver *ubwcp) unregister_chrdev_region(ubwcp->devt, UBWCP_NUM_DEVICES); } +struct handler_node { + struct list_head list; + u32 client_id; + ubwcp_error_handler_t handler; + void *data; +}; + +int ubwcp_register_error_handler(u32 client_id, ubwcp_error_handler_t handler, + void *data) +{ + struct handler_node *node; + unsigned long flags; + struct ubwcp_driver *ubwcp = ubwcp_get_driver(); + + if (!ubwcp) + return -EINVAL; + + if (client_id != -1) + return -EINVAL; + + if (!handler) + return -EINVAL; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + node->client_id = client_id; + node->handler = handler; + node->data = data; + + spin_lock_irqsave(&ubwcp->err_handler_list_lock, flags); + list_add_tail(&node->list, &ubwcp->err_handler_list); + spin_unlock_irqrestore(&ubwcp->err_handler_list_lock, flags); + + return 0; +} +EXPORT_SYMBOL(ubwcp_register_error_handler); + +static void ubwcp_notify_error_handlers(struct unwcp_err_info *err) +{ + struct handler_node *node; + unsigned long flags; + struct ubwcp_driver *ubwcp = ubwcp_get_driver(); + + if (!ubwcp) + return; + + spin_lock_irqsave(&ubwcp->err_handler_list_lock, flags); + list_for_each_entry(node, &ubwcp->err_handler_list, list) + node->handler(err, node->data); + + spin_unlock_irqrestore(&ubwcp->err_handler_list_lock, flags); +} + +int ubwcp_unregister_error_handler(u32 client_id) +{ + int ret = -EINVAL; + struct handler_node *node; + unsigned long flags; + struct ubwcp_driver *ubwcp = ubwcp_get_driver(); + + if (!ubwcp) + return -EINVAL; + + spin_lock_irqsave(&ubwcp->err_handler_list_lock, flags); + list_for_each_entry(node, &ubwcp->err_handler_list, list) + if (node->client_id == client_id) { + list_del(&node->list); + kfree(node); + ret = 0; + break; + } + spin_unlock_irqrestore(&ubwcp->err_handler_list_lock, flags); + + return ret; +} +EXPORT_SYMBOL(ubwcp_unregister_error_handler); + +/* get ubwcp_buf corresponding to the ULA PA*/ +static struct dma_buf *get_dma_buf_from_ulapa(phys_addr_t addr) +{ + struct ubwcp_buf *buf = NULL; + struct dma_buf *ret_buf = NULL; + struct ubwcp_driver *ubwcp = ubwcp_get_driver(); + unsigned long flags; + u32 i; + + if (!ubwcp) + return NULL; + + spin_lock_irqsave(&ubwcp->buf_table_lock, flags); + hash_for_each(ubwcp->buf_table, i, buf, hnode) { + if (buf->ula_pa <= addr && addr < buf->ula_pa + buf->ula_size) { + ret_buf = buf->dma_buf; + break; + } + } + spin_unlock_irqrestore(&ubwcp->buf_table_lock, flags); + + return ret_buf; +} + +/* get ubwcp_buf corresponding to the IOVA*/ +static struct dma_buf *get_dma_buf_from_iova(unsigned long addr) +{ + struct ubwcp_buf *buf = NULL; + struct dma_buf *ret_buf = NULL; + struct ubwcp_driver *ubwcp = ubwcp_get_driver(); + unsigned long flags; + u32 i; + + if (!ubwcp) + return NULL; + + spin_lock_irqsave(&ubwcp->buf_table_lock, flags); + hash_for_each(ubwcp->buf_table, i, buf, hnode) { + unsigned long iova_base = sg_dma_address(buf->sgt->sgl); + unsigned int iova_size = sg_dma_len(buf->sgt->sgl); + + if (iova_base <= addr && addr < iova_base + iova_size) { + ret_buf = buf->dma_buf; + break; + } + } + spin_unlock_irqrestore(&ubwcp->buf_table_lock, flags); + + return ret_buf; +} #define ERR_PRINT_COUNT_MAX 21 /* TBD: use proper rate limit for debug prints */ + +int ubwcp_iommu_fault_handler(struct iommu_domain *domain, struct device *dev, + unsigned long iova, int flags, void *data) +{ + int ret = 0; + struct unwcp_err_info err; + struct ubwcp_driver *ubwcp = ubwcp_get_driver(); + struct device *cb_dev = (struct device *)data; + + if (!ubwcp) { + ret = -EINVAL; + goto err; + } + + error_print_count++; + if (error_print_count < ERR_PRINT_COUNT_MAX) { + err.err_code = UBWCP_SMMU_FAULT; + + if (cb_dev == ubwcp->dev_desc_cb) + err.smmu_err.iommu_dev_id = UBWCP_DESC_CB_ID; + else if (cb_dev == ubwcp->dev_buf_cb) + err.smmu_err.iommu_dev_id = UBWCP_BUF_CB_ID; + else + err.smmu_err.iommu_dev_id = UBWCP_UNKNOWN_CB_ID; + + ERR("smmu fault error: iommu_dev_id:%d iova 0x%llx flags:0x%x", + err.smmu_err.iommu_dev_id, iova, flags); + err.smmu_err.dmabuf = get_dma_buf_from_iova(iova); + err.smmu_err.iova = iova; + err.smmu_err.iommu_fault_flags = flags; + ubwcp_notify_error_handlers(&err); + } + +err: + return ret; +} + + irqreturn_t ubwcp_irq_handler(int irq, void *ptr) { struct ubwcp_driver *ubwcp; void __iomem *base; u64 src; + phys_addr_t addr; + struct unwcp_err_info err; error_print_count++; @@ -2159,25 +2345,47 @@ irqreturn_t ubwcp_irq_handler(int irq, void *ptr) if (irq == ubwcp->irq_range_ck_rd) { if (error_print_count < ERR_PRINT_COUNT_MAX) { src = ubwcp_hw_interrupt_src_address(base, 0); - ERR("check range read error: src: 0x%llx", src << 6); + addr = src << 6; + ERR("check range read error: src: 0x%llx", addr); + err.err_code = UBWCP_RANGE_TRANSLATION_ERROR; + err.translation_err.dmabuf = get_dma_buf_from_ulapa(addr); + err.translation_err.ula_pa = addr; + err.translation_err.read = true; + ubwcp_notify_error_handlers(&err); } ubwcp_hw_interrupt_clear(ubwcp->base, 0); } else if (irq == ubwcp->irq_range_ck_wr) { if (error_print_count < ERR_PRINT_COUNT_MAX) { src = ubwcp_hw_interrupt_src_address(base, 1); - ERR("check range write error: src: 0x%llx", src << 6); + addr = src << 6; + ERR("check range write error: src: 0x%llx", addr); + err.err_code = UBWCP_RANGE_TRANSLATION_ERROR; + err.translation_err.dmabuf = get_dma_buf_from_ulapa(addr); + err.translation_err.ula_pa = addr; + err.translation_err.read = false; + ubwcp_notify_error_handlers(&err); } ubwcp_hw_interrupt_clear(ubwcp->base, 1); } else if (irq == ubwcp->irq_encode) { if (error_print_count < ERR_PRINT_COUNT_MAX) { src = ubwcp_hw_interrupt_src_address(base, 3); - ERR("encode error: src: 0x%llx", src << 6); + addr = src << 6; + ERR("encode error: src: 0x%llx", addr); + err.err_code = UBWCP_ENCODE_ERROR; + err.enc_err.dmabuf = get_dma_buf_from_ulapa(addr); + err.enc_err.ula_pa = addr; + ubwcp_notify_error_handlers(&err); } ubwcp_hw_interrupt_clear(ubwcp->base, 3); //TBD: encode is bit-3 instead of bit-2 } else if (irq == ubwcp->irq_decode) { if (error_print_count < ERR_PRINT_COUNT_MAX) { src = ubwcp_hw_interrupt_src_address(base, 2); - ERR("decode error: src: 0x%llx", src << 6); + addr = src << 6; + ERR("decode error: src: 0x%llx", addr); + err.err_code = UBWCP_DECODE_ERROR; + err.dec_err.dmabuf = get_dma_buf_from_ulapa(addr); + err.dec_err.ula_pa = addr; + ubwcp_notify_error_handlers(&err); } ubwcp_hw_interrupt_clear(ubwcp->base, 2); //TBD: decode is bit-2 instead of bit-3 } else { @@ -2300,6 +2508,16 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) /*TBD: remove later. reducing size for quick testing...*/ ubwcp->ula_pool_size = 0x20000000; //500MB instead of 8GB + INIT_LIST_HEAD(&ubwcp->err_handler_list); + + mutex_init(&ubwcp->desc_lock); + spin_lock_init(&ubwcp->buf_table_lock); + mutex_init(&ubwcp->mem_hotplug_lock); + mutex_init(&ubwcp->ula_lock); + mutex_init(&ubwcp->ubwcp_flush_lock); + mutex_init(&ubwcp->hw_range_ck_lock); + spin_lock_init(&ubwcp->err_handler_list_lock); + if (ubwcp_interrupt_register(pdev, ubwcp)) return -1; @@ -2317,13 +2535,6 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) return ret; } - mutex_init(&ubwcp->desc_lock); - mutex_init(&ubwcp->buf_table_lock); - mutex_init(&ubwcp->ula_lock); - mutex_init(&ubwcp->ubwcp_flush_lock); - mutex_init(&ubwcp->hw_range_ck_lock); - - if (ubwcp_power(ubwcp, true)) return -1; @@ -2412,6 +2623,7 @@ err_pool_create: static int ubwcp_probe_cb_buf(struct platform_device *pdev) { struct ubwcp_driver *ubwcp; + struct iommu_domain *domain = NULL; FENTRY(); @@ -2423,6 +2635,11 @@ static int ubwcp_probe_cb_buf(struct platform_device *pdev) /* save the buffer cb device */ ubwcp->dev_buf_cb = &pdev->dev; + + domain = iommu_get_domain_for_dev(ubwcp->dev_buf_cb); + if (domain) + iommu_set_fault_handler(domain, ubwcp_iommu_fault_handler, ubwcp->dev_buf_cb); + return 0; } @@ -2431,6 +2648,7 @@ static int ubwcp_probe_cb_desc(struct platform_device *pdev) { int ret = 0; struct ubwcp_driver *ubwcp; + struct iommu_domain *domain = NULL; FENTRY(); @@ -2477,6 +2695,10 @@ static int ubwcp_probe_cb_desc(struct platform_device *pdev) goto err; } + domain = iommu_get_domain_for_dev(ubwcp->dev_desc_cb); + if (domain) + iommu_set_fault_handler(domain, ubwcp_iommu_fault_handler, ubwcp->dev_desc_cb); + return ret; err: diff --git a/ubwcp/ubwcp_kernel_headers.py b/ubwcp/ubwcp_kernel_headers.py index 20a71dceb8..17e26bdfac 100644 --- a/ubwcp/ubwcp_kernel_headers.py +++ b/ubwcp/ubwcp_kernel_headers.py @@ -20,7 +20,7 @@ import re import subprocess import sys -def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h): +def run_uapi_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h): if not h.startswith(prefix): print('error: expected prefix [%s] on header [%s]' % (prefix, h)) return False @@ -32,22 +32,49 @@ def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h): cmd = ["sh", headers_install, h, out_h] if verbose: - print('run_headers_install: cmd is %s' % cmd) + print('run_uapi_headers_install: cmd is %s' % cmd) result = subprocess.call(cmd, env=env) if result != 0: - print('error: run_headers_install: cmd %s failed %d' % (cmd, result)) + print('error: run_uapi_headers_install: cmd %s failed %d' % (cmd, result)) return False return True -def gen_ubwcp_headers(verbose, gen_dir, headers_install, unifdef, ubwcp_include_uapi): +def run_kernel_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h): + if not h.startswith(prefix): + print('error: expected prefix [%s] on header [%s]' % (prefix, h)) + return False + + out_h = os.path.join(gen_dir, h[len(prefix):]) + (out_h_dirname, out_h_basename) = os.path.split(out_h) + env = os.environ.copy() + cmd = ["cp", h, out_h] + + if verbose: + print('run_kernel_headers_install: cmd is %s' % cmd) + + result = subprocess.call(cmd, env=env) + + if result != 0: + print('error: run_kernel_headers_install: cmd %s failed %d' % (cmd, result)) + return False + return True + +def gen_ubwcp_headers(verbose, gen_dir, headers_install, unifdef, ubwcp_include): error_count = 0 - for h in ubwcp_include_uapi: - ubwcp_uapi_include_prefix = os.path.join(h.split('/include/uapi')[0], 'include', 'uapi') + os.sep - if not run_headers_install( + for h in ubwcp_include: + if 'include/uapi' in h: + ubwcp_include_prefix = os.path.join(h.split('/include/uapi')[0], 'include', 'uapi') + os.sep + if not run_uapi_headers_install( verbose, gen_dir, headers_install, unifdef, - ubwcp_uapi_include_prefix, h): error_count += 1 + ubwcp_include_prefix, h): error_count += 1 + elif 'include/kernel' in h: + ubwcp_include_prefix = os.path.join(h.split('/include/kernel')[0], 'include', 'kernel') + os.sep + if not run_kernel_headers_install( + verbose, gen_dir, headers_install, unifdef, + ubwcp_include_prefix, h): error_count += 1 + return error_count def main(): @@ -67,8 +94,8 @@ def main(): '--gen_dir', required=True, help='Where to place the generated files.') parser.add_argument( - '--ubwcp_include_uapi', required=True, nargs='*', - help='The list of techpack/*/include/uapi header files.') + '--ubwcp_include', required=True, nargs='*', + help='The list of header files.') parser.add_argument( '--headers_install', required=True, help='The headers_install tool to process input headers.') @@ -82,12 +109,12 @@ def main(): if args.verbose: print('header_arch [%s]' % args.header_arch) print('gen_dir [%s]' % args.gen_dir) - print('ubwcp_include_uapi [%s]' % args.ubwcp_include_uapi) + print('ubwcp_include [%s]' % args.ubwcp_include) print('headers_install [%s]' % args.headers_install) print('unifdef [%s]' % args.unifdef) return gen_ubwcp_headers(args.verbose, args.gen_dir, - args.headers_install, args.unifdef, args.ubwcp_include_uapi) + args.headers_install, args.unifdef, args.ubwcp_include) if __name__ == '__main__': sys.exit(main()) From 47907f98a113e8e4446acd6f7a4850b9cfa98a32 Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Fri, 27 Jan 2023 15:44:19 -0800 Subject: [PATCH 12/35] ubwcp: bazel support Add support for bazel build system Change-Id: I9ec76c8341d7d1c909725016c0387beb13302570 Signed-off-by: Amol Jadi --- ubwcp/Android.mk | 2 +- ubwcp/BUILD.bazel | 4 ++++ ubwcp/Kbuild | 4 ++-- ubwcp/define_modules.bzl | 31 +++++++++++++++++++++++++++++++ ubwcp/{ubwcp.c => ubwcp_main.c} | 0 ubwcp_kernel_product_board.mk | 2 +- ubwcp_kernel_vendor_board.mk | 2 +- 7 files changed, 40 insertions(+), 5 deletions(-) create mode 100644 ubwcp/BUILD.bazel create mode 100644 ubwcp/define_modules.bzl rename ubwcp/{ubwcp.c => ubwcp_main.c} (100%) diff --git a/ubwcp/Android.mk b/ubwcp/Android.mk index 18891ae55e..d55687fae1 100644 --- a/ubwcp/Android.mk +++ b/ubwcp/Android.mk @@ -7,6 +7,6 @@ include $(CLEAR_VARS) LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/*) LOCAL_EXPORT_KO_INCLUDE_DIRS := $(LOCAL_PATH)/include/uapi LOCAL_EXPORT_KO_INCLUDE_DIRS += $(LOCAL_PATH)/include/kernel -LOCAL_MODULE := ubwcpx.ko +LOCAL_MODULE := ubwcp.ko LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk diff --git a/ubwcp/BUILD.bazel b/ubwcp/BUILD.bazel new file mode 100644 index 0000000000..38d9bae832 --- /dev/null +++ b/ubwcp/BUILD.bazel @@ -0,0 +1,4 @@ +load(":define_modules.bzl", "define_modules") + +define_modules("pineapple", "consolidate") +define_modules("pineapple", "gki") diff --git a/ubwcp/Kbuild b/ubwcp/Kbuild index 2b69d972d6..a3ac4ad95b 100644 --- a/ubwcp/Kbuild +++ b/ubwcp/Kbuild @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -ubwcpx-objs := ubwcp.o ubwcp_hw.o -obj-m += ubwcpx.o +ubwcp-objs := ubwcp_main.o ubwcp_hw.o +obj-m += ubwcp.o diff --git a/ubwcp/define_modules.bzl b/ubwcp/define_modules.bzl new file mode 100644 index 0000000000..e603953a25 --- /dev/null +++ b/ubwcp/define_modules.bzl @@ -0,0 +1,31 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_module") +load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") + + +def define_modules(target, variant): + tv = "{}_{}".format(target, variant) + + ddk_module( + name = "{}_ubwcp".format(tv), + out = "ubwcp.ko", + srcs = [ + "ubwcp_main.c", + "ubwcp_hw.c", + "ubwcp_hw.h", + ], + hdrs=["include/uapi/ubwcp_ioctl.h", "include/kernel/ubwcp.h"], + deps = ["//msm-kernel:all_headers"], + includes = ["include", "include/kernel"], + kernel_build = "//msm-kernel:{}".format(tv), + visibility = ["//visibility:public"] + ) + + copy_to_dist_dir( + name = "{}_ubwcp_dist".format(tv), + data = [":{}_ubwcp".format(tv)], + dist_dir = "out/target/product/{}/dlkm/lib/modules/".format(target), + flat = True, + wipe_dist_dir = False, + allow_duplicate_filenames = False, + mode_overrides = {"**/*": "644"}, + ) diff --git a/ubwcp/ubwcp.c b/ubwcp/ubwcp_main.c similarity index 100% rename from ubwcp/ubwcp.c rename to ubwcp/ubwcp_main.c diff --git a/ubwcp_kernel_product_board.mk b/ubwcp_kernel_product_board.mk index 46f92c8012..1e68affb4d 100644 --- a/ubwcp_kernel_product_board.mk +++ b/ubwcp_kernel_product_board.mk @@ -1,2 +1,2 @@ -PRODUCT_PACKAGES += ubwcpx.ko +PRODUCT_PACKAGES += ubwcp.ko diff --git a/ubwcp_kernel_vendor_board.mk b/ubwcp_kernel_vendor_board.mk index c1babcfcbc..8f8535ebd5 100644 --- a/ubwcp_kernel_vendor_board.mk +++ b/ubwcp_kernel_vendor_board.mk @@ -1,2 +1,2 @@ -BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ubwcpx.ko +BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ubwcp.ko From 7a0417fe15ddc6062aa94b4715b2d40748571726 Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Mon, 5 Dec 2022 16:28:16 -0800 Subject: [PATCH 13/35] Revert "ubwcp: Keep UBWC-P powered on after buffer alloc" This reverts commit ff2498ad42889d5f87a53105d1f660f3c7561674. No longer needed now that we are doing CMOs with an un-cached mapping. Change-Id: I927b93aaf869755a9fe36e77a24874e5b83a331d Signed-off-by: Liam Mark --- ubwcp/ubwcp_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index 56661e1604..d0768b1b31 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -2011,7 +2011,8 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) } else { DBG("DONE: calling offline_and_remove_memory() for ULA PA pool"); } - DBG("Don't Call power OFF ..."); + DBG("Calling power OFF ..."); + ubwcp_power(ubwcp, false); } mutex_unlock(&ubwcp->mem_hotplug_lock); return ret; From bc6e2ce23d2bc398467e021a413c8dd624c64721 Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Wed, 16 Nov 2022 11:27:04 -0800 Subject: [PATCH 14/35] ubwcp: Re-map as uncached and invalidate Re-map ULA PA as uncached and invalidate before power collapse. Change-Id: Ice676b98a472512c0be1ad83b5b592fec079e7ee Signed-off-by: Liam Mark --- ubwcp/ubwcp_main.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index d0768b1b31..34e3ba207b 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -26,6 +26,7 @@ #include #include #include +#include MODULE_IMPORT_NS(DMA_BUF); @@ -2000,6 +2001,24 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) * care of flush. Just a note for now. Might need to add the * flush here for debug purpose. */ + + DBG("set_direct_map_range_uncached() for ULA PA pool st:0x%lx num pages:%lu", + ubwcp->ula_pool_base, ubwcp->ula_pool_size >> PAGE_SHIFT); + ret = set_direct_map_range_uncached((unsigned long)phys_to_virt( + ubwcp->ula_pool_base), ubwcp->ula_pool_size >> PAGE_SHIFT); + if (ret) { + ERR("set_direct_map_range_uncached failed st:0x%lx num pages:%lu err: %d", + ubwcp->ula_pool_base, + ubwcp->ula_pool_size >> PAGE_SHIFT, ret); + goto err_remove_mem; + } else { + DBG("DONE: calling set_direct_map_range_uncached() for ULA PA pool"); + } + + DBG("Calling dma_sync_single_for_cpu() for ULA PA pool"); + dma_sync_single_for_cpu(ubwcp->dev, ubwcp->ula_pool_base, ubwcp->ula_pool_size, + DMA_BIDIRECTIONAL); + DBG("Calling offline_and_remove_memory() for ULA PA pool"); ret = offline_and_remove_memory(ubwcp->ula_pool_base, ubwcp->ula_pool_size); From fcb70f93284b47ed7cf35ec29d227521cb933e4d Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Thu, 26 Jan 2023 09:40:05 -0800 Subject: [PATCH 15/35] ubwcp: add ftrace support Add ftrace support to help with performance measurements. Change-Id: I21f762a9e25b2b745fbf4f72990c9932f823634b Signed-off-by: Liam Mark --- ubwcp/ubwcp_main.c | 56 +++++++++- ubwcp/ubwcp_trace.h | 252 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 306 insertions(+), 2 deletions(-) create mode 100644 ubwcp/ubwcp_trace.h diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index 34e3ba207b..2035090b4b 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -33,6 +33,8 @@ MODULE_IMPORT_NS(DMA_BUF); #include "include/kernel/ubwcp.h" #include "ubwcp_hw.h" #include "include/uapi/ubwcp_ioctl.h" +#define CREATE_TRACE_POINTS +#include "ubwcp_trace.h" #define UBWCP_NUM_DEVICES 1 #define UBWCP_DEVICE_NAME "ubwcp" @@ -442,23 +444,29 @@ static int ubwcp_init_buffer(struct dma_buf *dmabuf) bool table_empty; FENTRY(); + trace_ubwcp_init_buffer_start(dmabuf); - if (!ubwcp) + if (!ubwcp) { + trace_ubwcp_init_buffer_end(dmabuf); return -1; + } if (!dmabuf) { ERR("NULL dmabuf input ptr"); + trace_ubwcp_init_buffer_end(dmabuf); return -EINVAL; } if (dma_buf_to_ubwcp_buf(dmabuf)) { ERR("dma_buf already initialized for ubwcp"); + trace_ubwcp_init_buffer_end(dmabuf); return -EEXIST; } buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (!buf) { ERR("failed to alloc for new ubwcp_buf"); + trace_ubwcp_init_buffer_end(dmabuf); return -ENOMEM; } @@ -477,7 +485,9 @@ static int ubwcp_init_buffer(struct dma_buf *dmabuf) nid = memory_add_physaddr_to_nid(ubwcp->ula_pool_base); DBG("calling add_memory()..."); + trace_ubwcp_add_memory_start(dmabuf, ubwcp->ula_pool_size); ret = add_memory(nid, ubwcp->ula_pool_base, ubwcp->ula_pool_size, MHP_NONE); + trace_ubwcp_add_memory_end(dmabuf, ubwcp->ula_pool_size); if (ret) { ERR("add_memory() failed st:0x%lx sz:0x%lx err: %d", ubwcp->ula_pool_base, @@ -495,6 +505,7 @@ static int ubwcp_init_buffer(struct dma_buf *dmabuf) hash_add(ubwcp->buf_table, &buf->hnode, (u64)buf->dma_buf); spin_unlock_irqrestore(&ubwcp->buf_table_lock, flags); mutex_unlock(&ubwcp->mem_hotplug_lock); + trace_ubwcp_init_buffer_end(dmabuf); return ret; err_add_memory: @@ -504,6 +515,7 @@ err_power_on: kfree(buf); if (!ret) ret = -1; + trace_ubwcp_init_buffer_end(dmabuf); return ret; } @@ -1354,20 +1366,24 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) enum ubwcp_std_image_format std_image_format; FENTRY(); + trace_ubwcp_set_buf_attrs_start(dmabuf); if (!dmabuf) { ERR("NULL dmabuf input ptr"); + trace_ubwcp_set_buf_attrs_end(dmabuf); return -EINVAL; } if (!attr) { ERR("NULL attr ptr"); + trace_ubwcp_set_buf_attrs_end(dmabuf); return -EINVAL; } buf = dma_buf_to_ubwcp_buf(dmabuf); if (!buf) { ERR("No corresponding ubwcp_buf for the passed in dma_buf"); + trace_ubwcp_set_buf_attrs_end(dmabuf); return -EINVAL; } @@ -1412,6 +1428,7 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) reset_buf_attrs(buf); mutex_unlock(&buf->lock); + trace_ubwcp_set_buf_attrs_end(dmabuf); return 0; } @@ -1560,6 +1577,7 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) buf->buf_attr_set = true; //TBD: UBWCP_ASSERT(!buf->perm); mutex_unlock(&buf->lock); + trace_ubwcp_set_buf_attrs_end(dmabuf); return 0; err: @@ -1567,6 +1585,7 @@ err: mutex_unlock(&buf->lock); if (!ret) ret = -1; + trace_ubwcp_set_buf_attrs_end(dmabuf); return ret; } EXPORT_SYMBOL(ubwcp_set_buf_attrs); @@ -1639,20 +1658,25 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir) struct ubwcp_driver *ubwcp; FENTRY(); + trace_ubwcp_lock_start(dmabuf); if (!dmabuf) { ERR("NULL dmabuf input ptr"); + trace_ubwcp_lock_end(dmabuf); return -EINVAL; } + if (!valid_dma_direction(dir)) { ERR("invalid direction: %d", dir); + trace_ubwcp_lock_end(dmabuf); return -EINVAL; } buf = dma_buf_to_ubwcp_buf(dmabuf); if (!buf) { ERR("ubwcp_buf ptr not found"); + trace_ubwcp_lock_end(dmabuf); return -1; } @@ -1723,13 +1747,17 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir) * we force completion of that and then we also cpu invalidate which * will get rid of that line. */ + trace_ubwcp_hw_flush_start(dmabuf, buf->ula_size); ubwcp_flush(ubwcp); + trace_ubwcp_hw_flush_end(dmabuf, buf->ula_size); /* Flush/invalidate ULA PA from CPU caches * TBD: if (dir == READ or BIDIRECTION) //NOT for write * -- Confirm with Chris if this can be skipped for write */ + trace_ubwcp_dma_sync_single_for_cpu_start(dmabuf, buf->ula_size); dma_sync_single_for_cpu(ubwcp->dev, buf->ula_pa, buf->ula_size, dir); + trace_ubwcp_dma_sync_single_for_cpu_end(dmabuf, buf->ula_size); buf->lock_dir = dir; buf->locked = true; } else { @@ -1741,12 +1769,14 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir) buf->lock_count++; DBG("new lock_count: %d", buf->lock_count); mutex_unlock(&buf->lock); + trace_ubwcp_lock_end(dmabuf); return ret; err: mutex_unlock(&buf->lock); if (!ret) ret = -1; + trace_ubwcp_lock_end(dmabuf); return ret; } @@ -1775,14 +1805,18 @@ static int unlock_internal(struct ubwcp_buf *buf, enum dma_data_direction dir, b /* Flush/invalidate ULA PA from CPU caches */ //TBD: if (dir == WRITE or BIDIRECTION) + trace_ubwcp_dma_sync_single_for_device_start(buf->dma_buf, buf->ula_size); dma_sync_single_for_device(ubwcp->dev, buf->ula_pa, buf->ula_size, dir); + trace_ubwcp_dma_sync_single_for_device_end(buf->dma_buf, buf->ula_size); /* disable range check with ubwcp flush */ DBG("disabling range check"); //TBD: could combine these 2 locks into a single lock to make it simpler mutex_lock(&ubwcp->ubwcp_flush_lock); mutex_lock(&ubwcp->hw_range_ck_lock); + trace_ubwcp_hw_flush_start(buf->dma_buf, buf->ula_size); ret = ubwcp_hw_disable_range_check_with_flush(ubwcp->base, buf->desc->idx); + trace_ubwcp_hw_flush_end(buf->dma_buf, buf->ula_size); if (ret) ERR("disable_range_check_with_flush() failed: %d", ret); mutex_unlock(&ubwcp->hw_range_ck_lock); @@ -1816,25 +1850,29 @@ static int ubwcp_unlock(struct dma_buf *dmabuf, enum dma_data_direction dir) int ret; FENTRY(); - + trace_ubwcp_unlock_start(dmabuf); if (!dmabuf) { ERR("NULL dmabuf input ptr"); + trace_ubwcp_unlock_end(dmabuf); return -EINVAL; } if (!valid_dma_direction(dir)) { ERR("invalid direction: %d", dir); + trace_ubwcp_unlock_end(dmabuf); return -EINVAL; } buf = dma_buf_to_ubwcp_buf(dmabuf); if (!buf) { ERR("ubwcp_buf not found"); + trace_ubwcp_unlock_end(dmabuf); return -1; } if (!buf->locked) { ERR("unlock() called on buffer which not in locked state"); + trace_ubwcp_unlock_end(dmabuf); return -1; } @@ -1842,6 +1880,7 @@ static int ubwcp_unlock(struct dma_buf *dmabuf, enum dma_data_direction dir) mutex_lock(&buf->lock); ret = unlock_internal(buf, dir, false); mutex_unlock(&buf->lock); + trace_ubwcp_unlock_end(dmabuf); return ret; } @@ -1953,15 +1992,18 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) unsigned long flags; FENTRY(); + trace_ubwcp_free_buffer_start(dmabuf); if (!dmabuf) { ERR("NULL dmabuf input ptr"); + trace_ubwcp_free_buffer_end(dmabuf); return -EINVAL; } buf = dma_buf_to_ubwcp_buf(dmabuf); if (!buf) { ERR("ubwcp_buf ptr not found"); + trace_ubwcp_free_buffer_end(dmabuf); return -1; } @@ -2004,8 +2046,10 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) DBG("set_direct_map_range_uncached() for ULA PA pool st:0x%lx num pages:%lu", ubwcp->ula_pool_base, ubwcp->ula_pool_size >> PAGE_SHIFT); + trace_ubwcp_set_direct_map_range_uncached_start(dmabuf, ubwcp->ula_pool_size); ret = set_direct_map_range_uncached((unsigned long)phys_to_virt( ubwcp->ula_pool_base), ubwcp->ula_pool_size >> PAGE_SHIFT); + trace_ubwcp_set_direct_map_range_uncached_end(dmabuf, ubwcp->ula_pool_size); if (ret) { ERR("set_direct_map_range_uncached failed st:0x%lx num pages:%lu err: %d", ubwcp->ula_pool_base, @@ -2016,12 +2060,16 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) } DBG("Calling dma_sync_single_for_cpu() for ULA PA pool"); + trace_ubwcp_dma_sync_single_for_cpu_start(dmabuf, ubwcp->ula_pool_size); dma_sync_single_for_cpu(ubwcp->dev, ubwcp->ula_pool_base, ubwcp->ula_pool_size, DMA_BIDIRECTIONAL); + trace_ubwcp_dma_sync_single_for_cpu_end(dmabuf, ubwcp->ula_pool_size); DBG("Calling offline_and_remove_memory() for ULA PA pool"); + trace_ubwcp_offline_and_remove_memory_start(dmabuf, ubwcp->ula_pool_size); ret = offline_and_remove_memory(ubwcp->ula_pool_base, ubwcp->ula_pool_size); + trace_ubwcp_offline_and_remove_memory_end(dmabuf, ubwcp->ula_pool_size); if (ret) { ERR("offline_and_remove_memory failed st:0x%lx sz:0x%lx err: %d", ubwcp->ula_pool_base, @@ -2034,6 +2082,7 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) ubwcp_power(ubwcp, false); } mutex_unlock(&ubwcp->mem_hotplug_lock); + trace_ubwcp_free_buffer_end(dmabuf); return ret; err_remove_mem: @@ -2041,6 +2090,7 @@ err_remove_mem: if (!ret) ret = -1; DBG("returning error: %d", ret); + trace_ubwcp_free_buffer_end(dmabuf); return ret; } @@ -2829,6 +2879,7 @@ static int ubwcp_probe(struct platform_device *pdev) const char *compatible = ""; FENTRY(); + trace_ubwcp_probe(pdev); if (of_device_is_compatible(pdev->dev.of_node, "qcom,ubwcp")) return qcom_ubwcp_probe(pdev); @@ -2850,6 +2901,7 @@ static int ubwcp_remove(struct platform_device *pdev) const char *compatible = ""; FENTRY(); + trace_ubwcp_remove(pdev); /* TBD: what if buffers are still allocated? locked? etc. * also should turn off power? diff --git a/ubwcp/ubwcp_trace.h b/ubwcp/ubwcp_trace.h new file mode 100644 index 0000000000..6c6eb146c4 --- /dev/null +++ b/ubwcp/ubwcp_trace.h @@ -0,0 +1,252 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#if !defined(TRACE_UBWCP_H) || defined(TRACE_HEADER_MULTI_READ) +#define TRACE_UBWCP_H + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ubwcp + +/* Path must be relative to location of 'define_trace.h' header in kernel */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/mm-sys-kernel/ubwcp + +/* Name of trace header file */ +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE ubwcp_trace + +#include + +struct dma_buf; +struct platform_device; + +DECLARE_EVENT_CLASS(ubwcp_platform_device_event, + + TP_PROTO(struct platform_device *pdev), + + TP_ARGS(pdev), + + TP_STRUCT__entry( + __field(struct platform_device *, pdev) + ), + + TP_fast_assign( + __entry->pdev = pdev; + ), + + TP_printk("platform_device:0x%lx", + __entry->pdev) +); + +DEFINE_EVENT(ubwcp_platform_device_event, ubwcp_probe, + + TP_PROTO(struct platform_device *pdev), + + TP_ARGS(pdev) +); + +DEFINE_EVENT(ubwcp_platform_device_event, ubwcp_remove, + + TP_PROTO(struct platform_device *pdev), + + TP_ARGS(pdev) +); + +DECLARE_EVENT_CLASS(ubwcp_dmabuf_event, + + TP_PROTO(struct dma_buf *dbuf_addr), + + TP_ARGS(dbuf_addr), + + TP_STRUCT__entry( + __field(struct dma_buf *, dbuf_addr) + ), + + TP_fast_assign( + __entry->dbuf_addr = dbuf_addr; + ), + + TP_printk("dma-buf:0x%lx", + __entry->dbuf_addr) +); + +DECLARE_EVENT_CLASS(ubwcp_dmabuf_size_event, + + TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + + TP_ARGS(dbuf_addr, size), + + TP_STRUCT__entry( + __field(struct dma_buf *, dbuf_addr) + __field(size_t, size) + ), + + TP_fast_assign( + __entry->dbuf_addr = dbuf_addr; + __entry->size = size; + ), + + TP_printk("dma-buf:0x%lx size:%zu", + __entry->dbuf_addr, __entry->size) +); + +DEFINE_EVENT(ubwcp_dmabuf_event, ubwcp_init_buffer_start, + + TP_PROTO(struct dma_buf *dbuf_addr), + + TP_ARGS(dbuf_addr) +); + +DEFINE_EVENT(ubwcp_dmabuf_event, ubwcp_init_buffer_end, + + TP_PROTO(struct dma_buf *dbuf_addr), + + TP_ARGS(dbuf_addr) +); + +DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_add_memory_start, + + TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + + TP_ARGS(dbuf_addr, size) +); + +DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_add_memory_end, + + TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + + TP_ARGS(dbuf_addr, size) +); + +DEFINE_EVENT(ubwcp_dmabuf_event, ubwcp_set_buf_attrs_start, + + TP_PROTO(struct dma_buf *dbuf_addr), + + TP_ARGS(dbuf_addr) +); + +DEFINE_EVENT(ubwcp_dmabuf_event, ubwcp_set_buf_attrs_end, + + TP_PROTO(struct dma_buf *dbuf_addr), + + TP_ARGS(dbuf_addr) +); + +DEFINE_EVENT(ubwcp_dmabuf_event, ubwcp_lock_start, + + TP_PROTO(struct dma_buf *dbuf_addr), + + TP_ARGS(dbuf_addr) +); + +DEFINE_EVENT(ubwcp_dmabuf_event, ubwcp_lock_end, + + TP_PROTO(struct dma_buf *dbuf_addr), + + TP_ARGS(dbuf_addr) +); + +DEFINE_EVENT(ubwcp_dmabuf_event, ubwcp_unlock_start, + + TP_PROTO(struct dma_buf *dbuf_addr), + + TP_ARGS(dbuf_addr) +); + +DEFINE_EVENT(ubwcp_dmabuf_event, ubwcp_unlock_end, + + TP_PROTO(struct dma_buf *dbuf_addr), + + TP_ARGS(dbuf_addr) +); + +DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_dma_sync_single_for_device_start, + + TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + + TP_ARGS(dbuf_addr, size) +); + +DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_dma_sync_single_for_device_end, + + TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + + TP_ARGS(dbuf_addr, size) +); + +DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_dma_sync_single_for_cpu_start, + + TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + + TP_ARGS(dbuf_addr, size) +); + +DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_dma_sync_single_for_cpu_end, + + TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + + TP_ARGS(dbuf_addr, size) +); + +DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_hw_flush_start, + + TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + + TP_ARGS(dbuf_addr, size) +); + +DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_hw_flush_end, + + TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + + TP_ARGS(dbuf_addr, size) +); + +DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_offline_and_remove_memory_start, + + TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + + TP_ARGS(dbuf_addr, size) +); + +DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_offline_and_remove_memory_end, + + TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + + TP_ARGS(dbuf_addr, size) +); + +DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_set_direct_map_range_uncached_start, + + TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + + TP_ARGS(dbuf_addr, size) +); + +DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_set_direct_map_range_uncached_end, + + TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + + TP_ARGS(dbuf_addr, size) +); + +DEFINE_EVENT(ubwcp_dmabuf_event, ubwcp_free_buffer_start, + + TP_PROTO(struct dma_buf *dbuf_addr), + + TP_ARGS(dbuf_addr) +); + +DEFINE_EVENT(ubwcp_dmabuf_event, ubwcp_free_buffer_end, + + TP_PROTO(struct dma_buf *dbuf_addr), + + TP_ARGS(dbuf_addr) +); + +#endif + +/* This part must be outside protection */ +#include From b5312c51294baecdd99fc760f18fa37f0077b2cd Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Mon, 6 Feb 2023 10:39:42 -0800 Subject: [PATCH 16/35] ubwcp: Set ULA PA range size from DT Use the UBWC-P ula_range DT property to configure the UBWC-P ULA PA range size. Change-Id: I46579146904f76160c0f984d8e377e8d28a56dd7 Signed-off-by: Liam Mark --- ubwcp/ubwcp_main.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index 2035090b4b..32c81d885f 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -2575,9 +2575,6 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) } DBG("ubwcp: ula_range: size = 0x%lx", ubwcp->ula_pool_size); - /*TBD: remove later. reducing size for quick testing...*/ - ubwcp->ula_pool_size = 0x20000000; //500MB instead of 8GB - INIT_LIST_HEAD(&ubwcp->err_handler_list); mutex_init(&ubwcp->desc_lock); From 14742c1cfb80861f75c10e6ff37d428abaa5616e Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Mon, 6 Feb 2023 16:22:44 -0800 Subject: [PATCH 17/35] ubwcp: optimize power and mem offline perf Optimize the UBWC-P driver for power by ensuring that UBWC-P is only powered up when there is a non-linear UBWC-P buffer allocated. Optimize the performance of the memory offlining by adding support for the CMO component to be interrupted by the allocation of a new UBWC-P buffer. Change-Id: Ib473c00b996782131799fd223eaf1ad7feca058b Signed-off-by: Liam Mark --- ubwcp/ubwcp_main.c | 365 ++++++++++++++++++++++++++++++-------------- ubwcp/ubwcp_trace.h | 97 ++++++------ 2 files changed, 304 insertions(+), 158 deletions(-) diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index 32c81d885f..9c0bb1b33f 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -66,6 +66,8 @@ MODULE_IMPORT_NS(DMA_BUF); #define META_DATA_SIZE_ALIGN 4096 #define PIXEL_DATA_SIZE_ALIGN 4096 +#define UBWCP_SYNC_GRANULE 0x4000000L /* 64 MB */ + struct ubwcp_desc { int idx; void *ptr; @@ -138,10 +140,11 @@ struct ubwcp_driver { u32 hw_ver_major; u32 hw_ver_minor; - /* keep track of all buffers. hash table index'ed using dma_buf ptr. - * 2**8 = 256 hash values + /* keep track of all potential buffers. + * hash table index'ed using dma_buf ptr. + * 2**13 = 8192 hash values */ - DECLARE_HASHTABLE(buf_table, 8); + DECLARE_HASHTABLE(buf_table, 13); /* buffer descriptor */ void *buffer_desc_base; /* CPU address */ @@ -151,6 +154,9 @@ struct ubwcp_driver { struct ubwcp_image_format_info format_info[INFO_FORMAT_LIST_SIZE]; + atomic_t num_non_lin_buffers; + bool mem_online; + struct mutex desc_lock; /* allocate/free descriptors */ spinlock_t buf_table_lock; /* add/remove dma_buf into list of managed bufffers */ struct mutex mem_hotplug_lock; /* memory hotplug lock */ @@ -424,6 +430,186 @@ int ubwcp_get_hw_version(struct ubwcp_ioctl_hw_version *ver) } EXPORT_SYMBOL(ubwcp_get_hw_version); +static int add_ula_pa_memory(struct ubwcp_driver *ubwcp) +{ + int ret; + int nid; + + nid = memory_add_physaddr_to_nid(ubwcp->ula_pool_base); + DBG("calling add_memory()..."); + trace_ubwcp_add_memory_start(ubwcp->ula_pool_size); + ret = add_memory(nid, ubwcp->ula_pool_base, ubwcp->ula_pool_size, MHP_NONE); + trace_ubwcp_add_memory_end(ubwcp->ula_pool_size); + + if (ret) { + ERR("add_memory() failed st:0x%lx sz:0x%lx err: %d", + ubwcp->ula_pool_base, + ubwcp->ula_pool_size, + ret); + /* Fix to put driver in invalid state */ + } else { + DBG("add_memory() ula_pool_base:0x%llx, size:0x%zx, kernel addr:0x%p", + ubwcp->ula_pool_base, + ubwcp->ula_pool_size, + page_to_virt(pfn_to_page(PFN_DOWN(ubwcp->ula_pool_base)))); + } + + return ret; +} + +static int inc_num_non_lin_buffers(struct ubwcp_driver *ubwcp) +{ + int ret = 0; + + atomic_inc(&ubwcp->num_non_lin_buffers); + mutex_lock(&ubwcp->mem_hotplug_lock); + if (!ubwcp->mem_online) { + if (atomic_read(&ubwcp->num_non_lin_buffers) == 0) { + ret = -EINVAL; + ERR("Bad state: num_non_lin_buffers should not be 0"); + /* Fix to put driver in invalid state */ + goto err_power_on; + } + + ret = ubwcp_power(ubwcp, true); + if (ret) + goto err_power_on; + + ret = add_ula_pa_memory(ubwcp); + if (ret) + goto err_add_memory; + + ubwcp->mem_online = true; + } + mutex_unlock(&ubwcp->mem_hotplug_lock); + return 0; + +err_add_memory: + ubwcp_power(ubwcp, false); +err_power_on: + atomic_dec(&ubwcp->num_non_lin_buffers); + mutex_unlock(&ubwcp->mem_hotplug_lock); + + return ret; +} + +static int dec_num_non_lin_buffers(struct ubwcp_driver *ubwcp) +{ + int ret = 0; + + atomic_dec(&ubwcp->num_non_lin_buffers); + mutex_lock(&ubwcp->mem_hotplug_lock); + + /* If this is the last buffer being freed, power off ubwcp */ + if (atomic_read(&ubwcp->num_non_lin_buffers) == 0) { + unsigned long sync_remain = 0; + unsigned long sync_offset = 0; + unsigned long sync_size = 0; + unsigned long sync_granule = UBWCP_SYNC_GRANULE; + + DBG("last buffer: ~~~~~~~~~~~"); + if (!ubwcp->mem_online) { + ret = -EINVAL; + ERR("Bad state: mem_online should not be false"); + /* Fix to put driver in invalid state */ + goto err_remove_mem; + } + + DBG("set_direct_map_range_uncached() for ULA PA pool st:0x%lx num pages:%lu", + ubwcp->ula_pool_base, ubwcp->ula_pool_size >> PAGE_SHIFT); + trace_ubwcp_set_direct_map_range_uncached_start(ubwcp->ula_pool_size); + ret = set_direct_map_range_uncached((unsigned long)phys_to_virt( + ubwcp->ula_pool_base), ubwcp->ula_pool_size >> PAGE_SHIFT); + trace_ubwcp_set_direct_map_range_uncached_end(ubwcp->ula_pool_size); + if (ret) { + ERR("set_direct_map_range_uncached failed st:0x%lx num pages:%lu err: %d", + ubwcp->ula_pool_base, + ubwcp->ula_pool_size >> PAGE_SHIFT, ret); + goto err_remove_mem; + } else { + DBG("DONE: calling set_direct_map_range_uncached() for ULA PA pool"); + } + + DBG("Calling dma_sync_single_for_cpu() for ULA PA pool"); + trace_ubwcp_offline_sync_start(ubwcp->ula_pool_size); + + sync_remain = ubwcp->ula_pool_size; + sync_offset = 0; + while (sync_remain > 0) { + if (atomic_read(&ubwcp->num_non_lin_buffers) > 0) { + + trace_ubwcp_offline_sync_end(ubwcp->ula_pool_size); + DBG("Cancel memory offlining"); + + DBG("Calling offline_and_remove_memory() for ULA PA pool"); + trace_ubwcp_offline_and_remove_memory_start(ubwcp->ula_pool_size); + ret = offline_and_remove_memory(ubwcp->ula_pool_base, + ubwcp->ula_pool_size); + trace_ubwcp_offline_and_remove_memory_end(ubwcp->ula_pool_size); + if (ret) { + ERR("remove memory failed st:0x%lx sz:0x%lx err: %d", + ubwcp->ula_pool_base, + ubwcp->ula_pool_size, ret); + goto err_remove_mem; + } else { + DBG("DONE: calling remove memory for ULA PA pool"); + } + + ret = add_ula_pa_memory(ubwcp); + if (ret) { + ERR("Bad state: failed to add back memory"); + /* Fix to put driver in invalid state */ + ubwcp->mem_online = false; + } + mutex_unlock(&ubwcp->mem_hotplug_lock); + return ret; + } + + if (sync_granule > sync_remain) { + sync_size = sync_remain; + sync_remain = 0; + } else { + sync_size = sync_granule; + sync_remain -= sync_granule; + } + + DBG("Partial sync offset:0x%lx size:0x%lx", sync_offset, sync_size); + trace_ubwcp_dma_sync_single_for_cpu_start(sync_size); + dma_sync_single_for_cpu(ubwcp->dev, ubwcp->ula_pool_base + sync_offset, + sync_size, DMA_BIDIRECTIONAL); + trace_ubwcp_dma_sync_single_for_cpu_end(sync_size); + sync_offset += sync_size; + } + trace_ubwcp_offline_sync_end(ubwcp->ula_pool_size); + + DBG("Calling offline_and_remove_memory() for ULA PA pool"); + trace_ubwcp_offline_and_remove_memory_start(ubwcp->ula_pool_size); + ret = offline_and_remove_memory(ubwcp->ula_pool_base, ubwcp->ula_pool_size); + trace_ubwcp_offline_and_remove_memory_end(ubwcp->ula_pool_size); + if (ret) { + ERR("offline_and_remove_memory failed st:0x%lx sz:0x%lx err: %d", + ubwcp->ula_pool_base, + ubwcp->ula_pool_size, ret); + /* Fix to put driver in invalid state */ + goto err_remove_mem; + } else { + DBG("DONE: calling offline_and_remove_memory() for ULA PA pool"); + } + DBG("Calling power OFF ..."); + ubwcp_power(ubwcp, false); + ubwcp->mem_online = false; + } + mutex_unlock(&ubwcp->mem_hotplug_lock); + return 0; + +err_remove_mem: + atomic_inc(&ubwcp->num_non_lin_buffers); + mutex_unlock(&ubwcp->mem_hotplug_lock); + + DBG("returning error: %d", ret); + return ret; +} + /** * * Initialize ubwcp buffer for the given dma_buf. This @@ -436,12 +622,9 @@ EXPORT_SYMBOL(ubwcp_get_hw_version); */ static int ubwcp_init_buffer(struct dma_buf *dmabuf) { - int ret = 0; - int nid; struct ubwcp_buf *buf; struct ubwcp_driver *ubwcp = ubwcp_get_driver(); unsigned long flags; - bool table_empty; FENTRY(); trace_ubwcp_init_buffer_start(dmabuf); @@ -473,50 +656,14 @@ static int ubwcp_init_buffer(struct dma_buf *dmabuf) mutex_init(&buf->lock); buf->dma_buf = dmabuf; buf->ubwcp = ubwcp; + buf->buf_attr.image_format = UBWCP_LINEAR; - mutex_lock(&ubwcp->mem_hotplug_lock); - spin_lock_irqsave(&ubwcp->buf_table_lock, flags); - table_empty = hash_empty(ubwcp->buf_table); - spin_unlock_irqrestore(&ubwcp->buf_table_lock, flags); - if (table_empty) { - ret = ubwcp_power(ubwcp, true); - if (ret) - goto err_power_on; - - nid = memory_add_physaddr_to_nid(ubwcp->ula_pool_base); - DBG("calling add_memory()..."); - trace_ubwcp_add_memory_start(dmabuf, ubwcp->ula_pool_size); - ret = add_memory(nid, ubwcp->ula_pool_base, ubwcp->ula_pool_size, MHP_NONE); - trace_ubwcp_add_memory_end(dmabuf, ubwcp->ula_pool_size); - if (ret) { - ERR("add_memory() failed st:0x%lx sz:0x%lx err: %d", - ubwcp->ula_pool_base, - ubwcp->ula_pool_size, - ret); - goto err_add_memory; - } else { - DBG("add_memory() ula_pool_base:0x%llx, size:0x%zx, kernel addr:0x%p", - ubwcp->ula_pool_base, - ubwcp->ula_pool_size, - page_to_virt(pfn_to_page(PFN_DOWN(ubwcp->ula_pool_base)))); - } - } spin_lock_irqsave(&ubwcp->buf_table_lock, flags); hash_add(ubwcp->buf_table, &buf->hnode, (u64)buf->dma_buf); spin_unlock_irqrestore(&ubwcp->buf_table_lock, flags); - mutex_unlock(&ubwcp->mem_hotplug_lock); - trace_ubwcp_init_buffer_end(dmabuf); - return ret; -err_add_memory: - ubwcp_power(ubwcp, false); -err_power_on: - mutex_unlock(&ubwcp->mem_hotplug_lock); - kfree(buf); - if (!ret) - ret = -1; trace_ubwcp_init_buffer_end(dmabuf); - return ret; + return 0; } static void dump_attributes(struct ubwcp_buffer_attrs *attr) @@ -1305,6 +1452,7 @@ static void reset_buf_attrs(struct ubwcp_buf *buf) /* reset ubwcp params */ memset(mmdata, 0, sizeof(*mmdata)); buf->buf_attr_set = false; + buf->buf_attr.image_format = UBWCP_LINEAR; } static void print_mmdata_desc(struct ubwcp_hw_meta_metadata *mmdata) @@ -1364,6 +1512,7 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) u32 width_b; u32 height_b; enum ubwcp_std_image_format std_image_format; + bool is_non_lin_buf; FENTRY(); trace_ubwcp_set_buf_attrs_start(dmabuf); @@ -1392,11 +1541,12 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) if (buf->locked) { ERR("Cannot set attr when buffer is locked"); ret = -EBUSY; - goto err; + goto unlock; } ubwcp = buf->ubwcp; mmdata = &buf->mmdata; + is_non_lin_buf = (buf->buf_attr.image_format != UBWCP_LINEAR); //TBD: now that we have single exit point for all errors, //we can limit this call to error only? @@ -1406,7 +1556,7 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) ret = ubwcp->mmap_config_fptr(buf->dma_buf, true, 0, 0); if (ret) { ERR("dma_buf_mmap_config() failed: %d", ret); - goto err; + goto unlock; } if (!ubwcp_buf_attrs_valid(attr)) { @@ -1418,6 +1568,8 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) if (attr->image_format == UBWCP_LINEAR) { DBG_BUF_ATTR("Linear format requested"); + + /* linear format request with permanent range xlation doesn't * make sense. need to define behavior if this happens. * note: with perm set, desc is allocated to this buffer. @@ -1427,9 +1579,17 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) if (buf->buf_attr_set) reset_buf_attrs(buf); + if (is_non_lin_buf) { + /* + * Changing buffer from ubwc to linear so decrement + * number of ubwc buffers + */ + ret = dec_num_non_lin_buffers(ubwcp); + } + mutex_unlock(&buf->lock); trace_ubwcp_set_buf_attrs_end(dmabuf); - return 0; + return ret; } std_image_format = to_std_format(attr->image_format); @@ -1572,6 +1732,17 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) mmdata->width_height = width_b << 16 | attr->height; print_mmdata_desc(mmdata); + if (!is_non_lin_buf) { + /* + * Changing buffer from linear to ubwc so increment + * number of ubwc buffers + */ + ret = inc_num_non_lin_buffers(ubwcp); + } + if (ret) { + ERR("inc_num_non_lin_buffers failed: %d", ret); + goto err; + } buf->buf_attr = *attr; buf->buf_attr_set = true; @@ -1582,6 +1753,14 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) err: reset_buf_attrs(buf); + if (is_non_lin_buf) { + /* + * Changing buffer from ubwc to linear so decrement + * number of ubwc buffers + */ + dec_num_non_lin_buffers(ubwcp); + } +unlock: mutex_unlock(&buf->lock); if (!ret) ret = -1; @@ -1747,17 +1926,17 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir) * we force completion of that and then we also cpu invalidate which * will get rid of that line. */ - trace_ubwcp_hw_flush_start(dmabuf, buf->ula_size); + trace_ubwcp_hw_flush_start(buf->ula_size); ubwcp_flush(ubwcp); - trace_ubwcp_hw_flush_end(dmabuf, buf->ula_size); + trace_ubwcp_hw_flush_end(buf->ula_size); /* Flush/invalidate ULA PA from CPU caches * TBD: if (dir == READ or BIDIRECTION) //NOT for write * -- Confirm with Chris if this can be skipped for write */ - trace_ubwcp_dma_sync_single_for_cpu_start(dmabuf, buf->ula_size); + trace_ubwcp_dma_sync_single_for_cpu_start(buf->ula_size); dma_sync_single_for_cpu(ubwcp->dev, buf->ula_pa, buf->ula_size, dir); - trace_ubwcp_dma_sync_single_for_cpu_end(dmabuf, buf->ula_size); + trace_ubwcp_dma_sync_single_for_cpu_end(buf->ula_size); buf->lock_dir = dir; buf->locked = true; } else { @@ -1805,18 +1984,18 @@ static int unlock_internal(struct ubwcp_buf *buf, enum dma_data_direction dir, b /* Flush/invalidate ULA PA from CPU caches */ //TBD: if (dir == WRITE or BIDIRECTION) - trace_ubwcp_dma_sync_single_for_device_start(buf->dma_buf, buf->ula_size); + trace_ubwcp_dma_sync_single_for_device_start(buf->ula_size); dma_sync_single_for_device(ubwcp->dev, buf->ula_pa, buf->ula_size, dir); - trace_ubwcp_dma_sync_single_for_device_end(buf->dma_buf, buf->ula_size); + trace_ubwcp_dma_sync_single_for_device_end(buf->ula_size); /* disable range check with ubwcp flush */ DBG("disabling range check"); //TBD: could combine these 2 locks into a single lock to make it simpler mutex_lock(&ubwcp->ubwcp_flush_lock); mutex_lock(&ubwcp->hw_range_ck_lock); - trace_ubwcp_hw_flush_start(buf->dma_buf, buf->ula_size); + trace_ubwcp_hw_flush_start(buf->ula_size); ret = ubwcp_hw_disable_range_check_with_flush(ubwcp->base, buf->desc->idx); - trace_ubwcp_hw_flush_end(buf->dma_buf, buf->ula_size); + trace_ubwcp_hw_flush_end(buf->ula_size); if (ret) ERR("disable_range_check_with_flush() failed: %d", ret); mutex_unlock(&ubwcp->hw_range_ck_lock); @@ -1988,8 +2167,8 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) int ret = 0; struct ubwcp_buf *buf; struct ubwcp_driver *ubwcp; - bool table_empty; unsigned long flags; + bool is_non_lin_buf; FENTRY(); trace_ubwcp_free_buffer_start(dmabuf); @@ -2009,6 +2188,7 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) mutex_lock(&buf->lock); ubwcp = buf->ubwcp; + is_non_lin_buf = (buf->buf_attr.image_format != UBWCP_LINEAR); if (buf->locked) { DBG("free() called without unlock. unlock()'ing first..."); @@ -2027,71 +2207,17 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) if (buf->buf_attr_set) reset_buf_attrs(buf); - mutex_lock(&ubwcp->mem_hotplug_lock); spin_lock_irqsave(&ubwcp->buf_table_lock, flags); hash_del(&buf->hnode); - table_empty = hash_empty(ubwcp->buf_table); spin_unlock_irqrestore(&ubwcp->buf_table_lock, flags); kfree(buf); - /* If this is the last buffer being freed, power off ubwcp */ - if (table_empty) { - DBG("last buffer: ~~~~~~~~~~~"); - /* TBD: If everything is working fine, ubwcp_flush() should not - * be needed here. Each buffer free logic should be taking - * care of flush. Just a note for now. Might need to add the - * flush here for debug purpose. - */ + if (is_non_lin_buf) + dec_num_non_lin_buffers(ubwcp); - DBG("set_direct_map_range_uncached() for ULA PA pool st:0x%lx num pages:%lu", - ubwcp->ula_pool_base, ubwcp->ula_pool_size >> PAGE_SHIFT); - trace_ubwcp_set_direct_map_range_uncached_start(dmabuf, ubwcp->ula_pool_size); - ret = set_direct_map_range_uncached((unsigned long)phys_to_virt( - ubwcp->ula_pool_base), ubwcp->ula_pool_size >> PAGE_SHIFT); - trace_ubwcp_set_direct_map_range_uncached_end(dmabuf, ubwcp->ula_pool_size); - if (ret) { - ERR("set_direct_map_range_uncached failed st:0x%lx num pages:%lu err: %d", - ubwcp->ula_pool_base, - ubwcp->ula_pool_size >> PAGE_SHIFT, ret); - goto err_remove_mem; - } else { - DBG("DONE: calling set_direct_map_range_uncached() for ULA PA pool"); - } - - DBG("Calling dma_sync_single_for_cpu() for ULA PA pool"); - trace_ubwcp_dma_sync_single_for_cpu_start(dmabuf, ubwcp->ula_pool_size); - dma_sync_single_for_cpu(ubwcp->dev, ubwcp->ula_pool_base, ubwcp->ula_pool_size, - DMA_BIDIRECTIONAL); - trace_ubwcp_dma_sync_single_for_cpu_end(dmabuf, ubwcp->ula_pool_size); - - DBG("Calling offline_and_remove_memory() for ULA PA pool"); - trace_ubwcp_offline_and_remove_memory_start(dmabuf, ubwcp->ula_pool_size); - ret = offline_and_remove_memory(ubwcp->ula_pool_base, - ubwcp->ula_pool_size); - trace_ubwcp_offline_and_remove_memory_end(dmabuf, ubwcp->ula_pool_size); - if (ret) { - ERR("offline_and_remove_memory failed st:0x%lx sz:0x%lx err: %d", - ubwcp->ula_pool_base, - ubwcp->ula_pool_size, ret); - goto err_remove_mem; - } else { - DBG("DONE: calling offline_and_remove_memory() for ULA PA pool"); - } - DBG("Calling power OFF ..."); - ubwcp_power(ubwcp, false); - } - mutex_unlock(&ubwcp->mem_hotplug_lock); trace_ubwcp_free_buffer_end(dmabuf); - return ret; - -err_remove_mem: - mutex_unlock(&ubwcp->mem_hotplug_lock); - if (!ret) - ret = -1; - DBG("returning error: %d", ret); - trace_ubwcp_free_buffer_end(dmabuf); - return ret; + return 0; } @@ -2346,8 +2472,14 @@ static struct dma_buf *get_dma_buf_from_iova(unsigned long addr) spin_lock_irqsave(&ubwcp->buf_table_lock, flags); hash_for_each(ubwcp->buf_table, i, buf, hnode) { - unsigned long iova_base = sg_dma_address(buf->sgt->sgl); - unsigned int iova_size = sg_dma_len(buf->sgt->sgl); + unsigned long iova_base; + unsigned int iova_size; + + if (!buf->sgt) + continue; + + iova_base = sg_dma_address(buf->sgt->sgl); + iova_size = sg_dma_len(buf->sgt->sgl); if (iova_base <= addr && addr < iova_base + iova_size) { ret_buf = buf->dma_buf; @@ -2577,6 +2709,9 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) INIT_LIST_HEAD(&ubwcp->err_handler_list); + atomic_set(&ubwcp->num_non_lin_buffers, 0); + ubwcp->mem_online = false; + mutex_init(&ubwcp->desc_lock); spin_lock_init(&ubwcp->buf_table_lock); mutex_init(&ubwcp->mem_hotplug_lock); diff --git a/ubwcp/ubwcp_trace.h b/ubwcp/ubwcp_trace.h index 6c6eb146c4..3e9da11a58 100644 --- a/ubwcp/ubwcp_trace.h +++ b/ubwcp/ubwcp_trace.h @@ -72,24 +72,21 @@ DECLARE_EVENT_CLASS(ubwcp_dmabuf_event, __entry->dbuf_addr) ); -DECLARE_EVENT_CLASS(ubwcp_dmabuf_size_event, +DECLARE_EVENT_CLASS(ubwcp_size_event, - TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + TP_PROTO(size_t size), - TP_ARGS(dbuf_addr, size), + TP_ARGS(size), TP_STRUCT__entry( - __field(struct dma_buf *, dbuf_addr) __field(size_t, size) ), TP_fast_assign( - __entry->dbuf_addr = dbuf_addr; __entry->size = size; ), - TP_printk("dma-buf:0x%lx size:%zu", - __entry->dbuf_addr, __entry->size) + TP_printk("size:%zu", __entry->size) ); DEFINE_EVENT(ubwcp_dmabuf_event, ubwcp_init_buffer_start, @@ -106,18 +103,18 @@ DEFINE_EVENT(ubwcp_dmabuf_event, ubwcp_init_buffer_end, TP_ARGS(dbuf_addr) ); -DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_add_memory_start, +DEFINE_EVENT(ubwcp_size_event, ubwcp_add_memory_start, - TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + TP_PROTO(size_t size), - TP_ARGS(dbuf_addr, size) + TP_ARGS(size) ); -DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_add_memory_end, +DEFINE_EVENT(ubwcp_size_event, ubwcp_add_memory_end, - TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + TP_PROTO(size_t size), - TP_ARGS(dbuf_addr, size) + TP_ARGS(size) ); DEFINE_EVENT(ubwcp_dmabuf_event, ubwcp_set_buf_attrs_start, @@ -162,74 +159,88 @@ DEFINE_EVENT(ubwcp_dmabuf_event, ubwcp_unlock_end, TP_ARGS(dbuf_addr) ); -DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_dma_sync_single_for_device_start, +DEFINE_EVENT(ubwcp_size_event, ubwcp_offline_sync_start, - TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + TP_PROTO(size_t size), - TP_ARGS(dbuf_addr, size) + TP_ARGS(size) ); -DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_dma_sync_single_for_device_end, +DEFINE_EVENT(ubwcp_size_event, ubwcp_offline_sync_end, - TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + TP_PROTO(size_t size), - TP_ARGS(dbuf_addr, size) + TP_ARGS(size) ); -DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_dma_sync_single_for_cpu_start, +DEFINE_EVENT(ubwcp_size_event, ubwcp_dma_sync_single_for_device_start, - TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + TP_PROTO(size_t size), - TP_ARGS(dbuf_addr, size) + TP_ARGS(size) ); -DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_dma_sync_single_for_cpu_end, +DEFINE_EVENT(ubwcp_size_event, ubwcp_dma_sync_single_for_device_end, - TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + TP_PROTO(size_t size), - TP_ARGS(dbuf_addr, size) + TP_ARGS(size) ); -DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_hw_flush_start, +DEFINE_EVENT(ubwcp_size_event, ubwcp_dma_sync_single_for_cpu_start, - TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + TP_PROTO(size_t size), - TP_ARGS(dbuf_addr, size) + TP_ARGS(size) ); -DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_hw_flush_end, +DEFINE_EVENT(ubwcp_size_event, ubwcp_dma_sync_single_for_cpu_end, - TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + TP_PROTO(size_t size), - TP_ARGS(dbuf_addr, size) + TP_ARGS(size) ); -DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_offline_and_remove_memory_start, +DEFINE_EVENT(ubwcp_size_event, ubwcp_hw_flush_start, - TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + TP_PROTO(size_t size), - TP_ARGS(dbuf_addr, size) + TP_ARGS(size) ); -DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_offline_and_remove_memory_end, +DEFINE_EVENT(ubwcp_size_event, ubwcp_hw_flush_end, - TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + TP_PROTO(size_t size), - TP_ARGS(dbuf_addr, size) + TP_ARGS(size) ); -DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_set_direct_map_range_uncached_start, +DEFINE_EVENT(ubwcp_size_event, ubwcp_offline_and_remove_memory_start, - TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + TP_PROTO(size_t size), - TP_ARGS(dbuf_addr, size) + TP_ARGS(size) ); -DEFINE_EVENT(ubwcp_dmabuf_size_event, ubwcp_set_direct_map_range_uncached_end, +DEFINE_EVENT(ubwcp_size_event, ubwcp_offline_and_remove_memory_end, - TP_PROTO(struct dma_buf *dbuf_addr, size_t size), + TP_PROTO(size_t size), - TP_ARGS(dbuf_addr, size) + TP_ARGS(size) +); + +DEFINE_EVENT(ubwcp_size_event, ubwcp_set_direct_map_range_uncached_start, + + TP_PROTO(size_t size), + + TP_ARGS(size) +); + +DEFINE_EVENT(ubwcp_size_event, ubwcp_set_direct_map_range_uncached_end, + + TP_PROTO(size_t size), + + TP_ARGS(size) ); DEFINE_EVENT(ubwcp_dmabuf_event, ubwcp_free_buffer_start, From 7049c53fc24457b8d6a97b5785bd075f2fa74592 Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Tue, 7 Feb 2023 02:43:01 -0800 Subject: [PATCH 18/35] ubwcp: ioctl for stride alignment and bytes per pixels Implements stride alignment and stride validate IOCTL. Additional checks on stride value. Fixes spell error in ioctl interface. Change-Id: Ic4a41a12e2ffa8d45fd71938133accd069dff863 Signed-off-by: Amol Jadi --- ubwcp/include/kernel/ubwcp.h | 4 +- ubwcp/include/uapi/ubwcp_ioctl.h | 35 ++++- ubwcp/ubwcp_main.c | 253 +++++++++++++++++++++++-------- 3 files changed, 227 insertions(+), 65 deletions(-) diff --git a/ubwcp/include/kernel/ubwcp.h b/ubwcp/include/kernel/ubwcp.h index 7fe7018dc4..23c683f8e5 100644 --- a/ubwcp/include/kernel/ubwcp.h +++ b/ubwcp/include/kernel/ubwcp.h @@ -99,7 +99,7 @@ struct ubwcp_smmu_fault_err_info { int iommu_fault_flags; }; -struct unwcp_err_info { +struct ubwcp_err_info { enum ubwcp_error err_code; union { struct ubwcp_enc_err_info enc_err; @@ -109,7 +109,7 @@ struct unwcp_err_info { }; }; -typedef void (*ubwcp_error_handler_t)(struct unwcp_err_info *err, void *data); +typedef void (*ubwcp_error_handler_t)(struct ubwcp_err_info *err, void *data); /* * Register an error handler diff --git a/ubwcp/include/uapi/ubwcp_ioctl.h b/ubwcp/include/uapi/ubwcp_ioctl.h index 7a13ab54c5..4b9bc7338e 100644 --- a/ubwcp/include/uapi/ubwcp_ioctl.h +++ b/ubwcp/include/uapi/ubwcp_ioctl.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __UBWCP_IOCTL_H_ @@ -11,6 +11,8 @@ #define UBWCP_IOCTL_SET_BUF_ATTR _IOW('U', 1, struct ubwcp_ioctl_buffer_attrs) #define UBWCP_IOCTL_GET_HW_VER _IOR('U', 2, struct ubwcp_ioctl_hw_version) +#define UBWCP_IOCTL_GET_STRIDE_ALIGN _IOWR('U', 3, struct ubwcp_ioctl_stride_align) +#define UBWCP_IOCTL_VALIDATE_STRIDE _IOWR('U', 4, struct ubwcp_ioctl_validate_stride) enum ubwcp_image_format { @@ -115,4 +117,35 @@ struct ubwcp_ioctl_hw_version { __u32 minor; }; +/** + * Stride alignment for given format + * @image_format: image format + * @stride_align: stride alignment + * @unused: must be set to 0 + * IOCTL will fail for linear image format + */ +struct ubwcp_ioctl_stride_align { + __u16 image_format; + __u16 stride_align; + __u32 unused; +}; + +/** + * validate stride + * @image_format: image format + * @width: image width in pixels + * @stride: image stride in bytes + * @valid: returns 0 (not valid), 1 (valid) + * @unusedX: must be set to 0 + * IOCTL will fail for linear image format + */ +struct ubwcp_ioctl_validate_stride { + __u16 image_format; + __u32 width; + __u32 stride; + __u16 valid; + __u16 unused1; + __u16 unused2; +}; + #endif /* __UBWCP_IOCTL_H_ */ diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index 9c0bb1b33f..8e3881d89d 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -211,19 +211,19 @@ static void image_format_init(struct ubwcp_driver *ubwcp) {1, {{4, 1, {16, 4}, {64, 16}}}}; ubwcp->format_info[NV12] = (struct ubwcp_image_format_info) {2, {{1, 1, {32, 8}, {128, 32}}, - {2, 1, {16, 8}, { 64, 32}}}}; + {2, 1, {16, 8}, { 64, 32}}}}; ubwcp->format_info[NV124R] = (struct ubwcp_image_format_info) {2, {{1, 1, {64, 4}, {256, 16}}, - {2, 1, {32, 4}, {128, 16}}}}; + {2, 1, {32, 4}, {128, 16}}}}; ubwcp->format_info[P010] = (struct ubwcp_image_format_info) {2, {{2, 1, {32, 4}, {128, 16}}, - {4, 1, {16, 4}, { 64, 16}}}}; + {4, 1, {16, 4}, { 64, 16}}}}; ubwcp->format_info[TP10] = (struct ubwcp_image_format_info) {2, {{4, 3, {48, 4}, {192, 16}}, - {8, 3, {24, 4}, { 96, 16}}}}; + {8, 3, {24, 4}, { 96, 16}}}}; ubwcp->format_info[P016] = (struct ubwcp_image_format_info) {2, {{2, 1, {32, 4}, {128, 16}}, - {4, 1, {16, 4}, { 64, 16}}}}; + {4, 1, {16, 4}, { 64, 16}}}}; } static void ubwcp_buf_desc_list_init(struct ubwcp_driver *ubwcp) @@ -686,12 +686,111 @@ static void dump_attributes(struct ubwcp_buffer_attrs *attr) DBG_BUF_ATTR(""); } -/* validate buffer attributes */ -static bool ubwcp_buf_attrs_valid(struct ubwcp_buffer_attrs *attr) +static enum ubwcp_std_image_format to_std_format(u16 ioctl_image_format) { - bool valid_format; + switch (ioctl_image_format) { + case UBWCP_RGBA8888: + return RGBA; + case UBWCP_NV12: + case UBWCP_NV12_Y: + case UBWCP_NV12_UV: + return NV12; + case UBWCP_NV124R: + case UBWCP_NV124R_Y: + case UBWCP_NV124R_UV: + return NV124R; + case UBWCP_TP10: + case UBWCP_TP10_Y: + case UBWCP_TP10_UV: + return TP10; + case UBWCP_P010: + case UBWCP_P010_Y: + case UBWCP_P010_UV: + return P010; + case UBWCP_P016: + case UBWCP_P016_Y: + case UBWCP_P016_UV: + return P016; + default: + WARN(1, "Fix this!!!"); + return STD_IMAGE_FORMAT_INVALID; + } +} - switch (attr->image_format) { +static int get_stride_alignment(enum ubwcp_std_image_format format, u16 *align) +{ + switch (format) { + case TP10: + *align = 64; + return 0; + case NV12: + *align = 128; + return 0; + case RGBA: + case NV124R: + case P010: + case P016: + *align = 256; + return 0; + default: + return -1; + } +} + +/* returns stride of compressed image */ +static u32 get_compressed_stride(struct ubwcp_driver *ubwcp, + enum ubwcp_std_image_format format, u32 width) +{ + struct ubwcp_plane_info p_info; + u16 macro_tile_width_p; + u16 pixel_bytes; + u16 per_pixel; + + p_info = ubwcp->format_info[format].p_info[0]; + macro_tile_width_p = p_info.macrotilesize_p.width; + pixel_bytes = p_info.pixel_bytes; + per_pixel = p_info.per_pixel; + + return UBWCP_ALIGN(width, macro_tile_width_p)*pixel_bytes/per_pixel; +} + +/* check if linear stride conforms to hw limitations + * always returns false for linear image + */ +static bool stride_is_valid(struct ubwcp_driver *ubwcp, + u16 ioctl_img_fmt, u32 width, u32 lin_stride) +{ + u32 compressed_stride; + enum ubwcp_std_image_format format = to_std_format(ioctl_img_fmt); + + if (format == STD_IMAGE_FORMAT_INVALID) + return false; + + if ((lin_stride < width) || (lin_stride > 64*1024)) { + ERR("stride is not valid (width <= stride <= 64K): %d", lin_stride); + return false; + } + + if (format == TP10) { + if(!IS_ALIGNED(lin_stride, 64)) { + ERR("stride must be aligned to 64: %d", lin_stride); + return false; + } + } else { + compressed_stride = get_compressed_stride(ubwcp, format, width); + if (lin_stride != compressed_stride) { + ERR("linear stride: %d must be same as compressed stride: %d", + lin_stride, compressed_stride); + return false; + } + } + + return true; +} + +static bool ioctl_format_is_valid(u16 ioctl_image_format) +{ + switch (ioctl_image_format) { case UBWCP_LINEAR: case UBWCP_RGBA8888: case UBWCP_NV12: @@ -709,13 +808,16 @@ static bool ubwcp_buf_attrs_valid(struct ubwcp_buffer_attrs *attr) case UBWCP_P016: case UBWCP_P016_Y: case UBWCP_P016_UV: - valid_format = true; - break; + return true; default: - valid_format = false; + return false; } +} - if (!valid_format) { +/* validate buffer attributes */ +static bool ubwcp_buf_attrs_valid(struct ubwcp_driver *ubwcp, struct ubwcp_buffer_attrs *attr) +{ + if (!ioctl_format_is_valid(attr->image_format)) { ERR("invalid image format: %d", attr->image_format); goto err; } @@ -749,19 +851,14 @@ static bool ubwcp_buf_attrs_valid(struct ubwcp_buffer_attrs *attr) goto err; } + if (attr->image_format != UBWCP_LINEAR) + if(!stride_is_valid(ubwcp, attr->image_format, attr->width, attr->stride)) { + ERR("stride is invalid: %d", attr->stride); + goto err; + } - /* TBD: what's the upper limit for stride? 8K is likely too high. */ - if (!IS_ALIGNED(attr->stride, 64) || - (attr->stride < attr->width) || - (attr->stride > 4*8192)) { - ERR("stride is not valid (aligned to 64 and <= 8192): %d", - attr->stride); - goto err; - } - - /* TBD: currently assume height + 10. Replace 10 with right num from camera. */ if ((attr->scanlines < attr->height) || - (attr->scanlines > attr->height + 10)) { + (attr->scanlines > attr->height + 32*1024)) { ERR("scanlines is not valid - height: %d scanlines: %d", attr->height, attr->scanlines); goto err; @@ -1054,37 +1151,6 @@ int planes_in_format(enum ubwcp_std_image_format format) return 2; } -enum ubwcp_std_image_format to_std_format(u16 ioctl_image_format) -{ - switch (ioctl_image_format) { - case UBWCP_RGBA8888: - return RGBA; - case UBWCP_NV12: - case UBWCP_NV12_Y: - case UBWCP_NV12_UV: - return NV12; - case UBWCP_NV124R: - case UBWCP_NV124R_Y: - case UBWCP_NV124R_UV: - return NV124R; - case UBWCP_TP10: - case UBWCP_TP10_Y: - case UBWCP_TP10_UV: - return TP10; - case UBWCP_P010: - case UBWCP_P010_Y: - case UBWCP_P010_UV: - return P010; - case UBWCP_P016: - case UBWCP_P016_Y: - case UBWCP_P016_UV: - return P016; - default: - WARN(1, "Fix this!!!"); - return STD_IMAGE_FORMAT_INVALID; - } -} - unsigned int ubwcp_get_hw_image_format_value(u16 ioctl_image_format) { enum ubwcp_std_image_format format; @@ -1559,13 +1625,11 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) goto unlock; } - if (!ubwcp_buf_attrs_valid(attr)) { + if (!ubwcp_buf_attrs_valid(ubwcp, attr)) { ERR("Invalid buf attrs"); goto err; } - DBG_BUF_ATTR("valid buf attrs"); - if (attr->image_format == UBWCP_LINEAR) { DBG_BUF_ATTR("Linear format requested"); @@ -2234,12 +2298,15 @@ static int ubwcp_close(struct inode *i, struct file *f) return 0; } - /* handle IOCTLs */ static long ubwcp_ioctl(struct file *file, unsigned int ioctl_num, unsigned long ioctl_param) { struct ubwcp_ioctl_buffer_attrs buf_attr_ioctl; struct ubwcp_ioctl_hw_version hw_ver; + struct ubwcp_ioctl_validate_stride validate_stride_ioctl; + struct ubwcp_ioctl_stride_align stride_align_ioctl; + enum ubwcp_std_image_format format; + struct ubwcp_driver *ubwcp; switch (ioctl_num) { case UBWCP_IOCTL_SET_BUF_ATTR: @@ -2260,6 +2327,68 @@ static long ubwcp_ioctl(struct file *file, unsigned int ioctl_num, unsigned long } break; + case UBWCP_IOCTL_GET_STRIDE_ALIGN: + DBG("IOCTL : GET_STRIDE_ALIGN"); + if (copy_from_user(&stride_align_ioctl, (const void __user *) ioctl_param, + sizeof(stride_align_ioctl))) { + ERR("ERROR: copy_from_user() failed"); + return -EFAULT; + } + + format = to_std_format(stride_align_ioctl.image_format); + if (format == STD_IMAGE_FORMAT_INVALID) + return -EINVAL; + + if (stride_align_ioctl.unused != 0) + return -EINVAL; + + if (get_stride_alignment(format, &stride_align_ioctl.stride_align)) { + ERR("ERROR: copy_to_user() failed"); + return -EFAULT; + } + + if (copy_to_user((void __user *)ioctl_param, &stride_align_ioctl, + sizeof(stride_align_ioctl))) { + ERR("ERROR: copy_to_user() failed"); + return -EFAULT; + } + break; + + case UBWCP_IOCTL_VALIDATE_STRIDE: + DBG("IOCTL : VALIDATE_STRIDE"); + ubwcp = ubwcp_get_driver(); + if (!ubwcp) + return -EINVAL; + + if (copy_from_user(&validate_stride_ioctl, (const void __user *) ioctl_param, + sizeof(validate_stride_ioctl))) { + ERR("ERROR: copy_from_user() failed"); + return -EFAULT; + } + + format = to_std_format(validate_stride_ioctl.image_format); + if (format == STD_IMAGE_FORMAT_INVALID) { + ERR("ERROR: invalid format: %d", validate_stride_ioctl.image_format); + return -EINVAL; + } + + if (validate_stride_ioctl.unused1 || validate_stride_ioctl.unused2) { + ERR("ERROR: unused values must be set to 0"); + return -EINVAL; + } + + validate_stride_ioctl.valid = stride_is_valid(ubwcp, + validate_stride_ioctl.image_format, + validate_stride_ioctl.width, + validate_stride_ioctl.stride); + + if (copy_to_user((void __user *)ioctl_param, &validate_stride_ioctl, + sizeof(validate_stride_ioctl))) { + ERR("ERROR: copy_to_user() failed"); + return -EFAULT; + } + break; + default: ERR("Invalid ioctl_num = %d", ioctl_num); return -EINVAL; @@ -2394,7 +2523,7 @@ int ubwcp_register_error_handler(u32 client_id, ubwcp_error_handler_t handler, } EXPORT_SYMBOL(ubwcp_register_error_handler); -static void ubwcp_notify_error_handlers(struct unwcp_err_info *err) +static void ubwcp_notify_error_handlers(struct ubwcp_err_info *err) { struct handler_node *node; unsigned long flags; @@ -2498,7 +2627,7 @@ int ubwcp_iommu_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags, void *data) { int ret = 0; - struct unwcp_err_info err; + struct ubwcp_err_info err; struct ubwcp_driver *ubwcp = ubwcp_get_driver(); struct device *cb_dev = (struct device *)data; @@ -2537,7 +2666,7 @@ irqreturn_t ubwcp_irq_handler(int irq, void *ptr) void __iomem *base; u64 src; phys_addr_t addr; - struct unwcp_err_info err; + struct ubwcp_err_info err; error_print_count++; From 3c0b9be4c9a4a24aa344aad43c29d3a0a7d96125 Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Fri, 7 Apr 2023 09:22:36 -0700 Subject: [PATCH 19/35] ubwcp: Replace add_memory with memremap_pages Replace add_memory with memremap_pages as that will ensure that ULA PA pages are not added to buddy. Change-Id: I66dd4c533011f9e7ed4ed45697216ba947a4e3a3 Signed-off-by: Liam Mark --- ubwcp/ubwcp_main.c | 60 +++++++++++++++++++-------------------------- ubwcp/ubwcp_trace.h | 8 +++--- 2 files changed, 29 insertions(+), 39 deletions(-) diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index 8e3881d89d..c14756451a 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -27,6 +27,7 @@ #include #include #include +#include MODULE_IMPORT_NS(DMA_BUF); @@ -165,6 +166,8 @@ struct ubwcp_driver { struct mutex hw_range_ck_lock; /* range ck */ struct list_head err_handler_list; /* error handler list */ spinlock_t err_handler_list_lock; /* err_handler_list lock */ + + struct dev_pagemap pgmap; }; struct ubwcp_buf { @@ -432,23 +435,29 @@ EXPORT_SYMBOL(ubwcp_get_hw_version); static int add_ula_pa_memory(struct ubwcp_driver *ubwcp) { - int ret; + int ret = 0; int nid; + void *ptr; nid = memory_add_physaddr_to_nid(ubwcp->ula_pool_base); - DBG("calling add_memory()..."); - trace_ubwcp_add_memory_start(ubwcp->ula_pool_size); - ret = add_memory(nid, ubwcp->ula_pool_base, ubwcp->ula_pool_size, MHP_NONE); - trace_ubwcp_add_memory_end(ubwcp->ula_pool_size); + DBG("calling memremap_pages()..."); + ubwcp->pgmap.type = MEMORY_DEVICE_GENERIC; + ubwcp->pgmap.nr_range = 1; + ubwcp->pgmap.range.start = ubwcp->ula_pool_base; + ubwcp->pgmap.range.end = ubwcp->ula_pool_base + ubwcp->ula_pool_size - 1; + trace_ubwcp_memremap_pages_start(ubwcp->ula_pool_size); + ptr = memremap_pages(&ubwcp->pgmap, nid); + trace_ubwcp_memremap_pages_end(ubwcp->ula_pool_size); - if (ret) { - ERR("add_memory() failed st:0x%lx sz:0x%lx err: %d", + if (IS_ERR(ptr)) { + ret = IS_ERR(ptr); + ERR("memremap_pages() failed st:0x%lx sz:0x%lx err: %d", ubwcp->ula_pool_base, ubwcp->ula_pool_size, ret); /* Fix to put driver in invalid state */ } else { - DBG("add_memory() ula_pool_base:0x%llx, size:0x%zx, kernel addr:0x%p", + DBG("memremap_pages() ula_pool_base:0x%llx, size:0x%zx, kernel addr:0x%p", ubwcp->ula_pool_base, ubwcp->ula_pool_size, page_to_virt(pfn_to_page(PFN_DOWN(ubwcp->ula_pool_base)))); @@ -541,20 +550,10 @@ static int dec_num_non_lin_buffers(struct ubwcp_driver *ubwcp) trace_ubwcp_offline_sync_end(ubwcp->ula_pool_size); DBG("Cancel memory offlining"); - DBG("Calling offline_and_remove_memory() for ULA PA pool"); - trace_ubwcp_offline_and_remove_memory_start(ubwcp->ula_pool_size); - ret = offline_and_remove_memory(ubwcp->ula_pool_base, - ubwcp->ula_pool_size); - trace_ubwcp_offline_and_remove_memory_end(ubwcp->ula_pool_size); - if (ret) { - ERR("remove memory failed st:0x%lx sz:0x%lx err: %d", - ubwcp->ula_pool_base, - ubwcp->ula_pool_size, ret); - goto err_remove_mem; - } else { - DBG("DONE: calling remove memory for ULA PA pool"); - } - + DBG("Calling memunmap_pages() for ULA PA pool"); + trace_ubwcp_memunmap_pages_start(ubwcp->ula_pool_size); + memunmap_pages(&ubwcp->pgmap); + trace_ubwcp_memunmap_pages_end(ubwcp->ula_pool_size); ret = add_ula_pa_memory(ubwcp); if (ret) { ERR("Bad state: failed to add back memory"); @@ -581,20 +580,11 @@ static int dec_num_non_lin_buffers(struct ubwcp_driver *ubwcp) sync_offset += sync_size; } trace_ubwcp_offline_sync_end(ubwcp->ula_pool_size); + DBG("Calling memunmap_pages() for ULA PA pool"); + trace_ubwcp_memunmap_pages_start(ubwcp->ula_pool_size); + memunmap_pages(&ubwcp->pgmap); + trace_ubwcp_memunmap_pages_end(ubwcp->ula_pool_size); - DBG("Calling offline_and_remove_memory() for ULA PA pool"); - trace_ubwcp_offline_and_remove_memory_start(ubwcp->ula_pool_size); - ret = offline_and_remove_memory(ubwcp->ula_pool_base, ubwcp->ula_pool_size); - trace_ubwcp_offline_and_remove_memory_end(ubwcp->ula_pool_size); - if (ret) { - ERR("offline_and_remove_memory failed st:0x%lx sz:0x%lx err: %d", - ubwcp->ula_pool_base, - ubwcp->ula_pool_size, ret); - /* Fix to put driver in invalid state */ - goto err_remove_mem; - } else { - DBG("DONE: calling offline_and_remove_memory() for ULA PA pool"); - } DBG("Calling power OFF ..."); ubwcp_power(ubwcp, false); ubwcp->mem_online = false; diff --git a/ubwcp/ubwcp_trace.h b/ubwcp/ubwcp_trace.h index 3e9da11a58..1e42a2882d 100644 --- a/ubwcp/ubwcp_trace.h +++ b/ubwcp/ubwcp_trace.h @@ -103,14 +103,14 @@ DEFINE_EVENT(ubwcp_dmabuf_event, ubwcp_init_buffer_end, TP_ARGS(dbuf_addr) ); -DEFINE_EVENT(ubwcp_size_event, ubwcp_add_memory_start, +DEFINE_EVENT(ubwcp_size_event, ubwcp_memremap_pages_start, TP_PROTO(size_t size), TP_ARGS(size) ); -DEFINE_EVENT(ubwcp_size_event, ubwcp_add_memory_end, +DEFINE_EVENT(ubwcp_size_event, ubwcp_memremap_pages_end, TP_PROTO(size_t size), @@ -215,14 +215,14 @@ DEFINE_EVENT(ubwcp_size_event, ubwcp_hw_flush_end, TP_ARGS(size) ); -DEFINE_EVENT(ubwcp_size_event, ubwcp_offline_and_remove_memory_start, +DEFINE_EVENT(ubwcp_size_event, ubwcp_memunmap_pages_start, TP_PROTO(size_t size), TP_ARGS(size) ); -DEFINE_EVENT(ubwcp_size_event, ubwcp_offline_and_remove_memory_end, +DEFINE_EVENT(ubwcp_size_event, ubwcp_memunmap_pages_end, TP_PROTO(size_t size), From d9bea7f7f40fde27fb2b2a1dd38e51e41f63ae0b Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Fri, 17 Feb 2023 14:48:02 -0800 Subject: [PATCH 20/35] ubwcp: limit error logs Enable read & decode irq only for debug. Rate limit error logs. Change-Id: I782d5af9049bdb501d14f3cbc6a7f868d49da581 Signed-off-by: Amol Jadi --- ubwcp/ubwcp_hw.c | 41 +++--- ubwcp/ubwcp_hw.h | 5 +- ubwcp/ubwcp_main.c | 309 +++++++++++++++++++++++++++++++++------------ 3 files changed, 253 insertions(+), 102 deletions(-) diff --git a/ubwcp/ubwcp_hw.c b/ubwcp/ubwcp_hw.c index ed3e485038..8d32213fb6 100644 --- a/ubwcp/ubwcp_hw.c +++ b/ubwcp/ubwcp_hw.c @@ -13,18 +13,14 @@ #include "ubwcp_hw.h" -extern u32 ubwcp_debug_trace_enable; +static bool ubwcp_hw_trace_en; //#define DBG(fmt, args...) #define DBG(fmt, args...) \ do { \ - if (ubwcp_debug_trace_enable) \ + if (ubwcp_hw_trace_en) \ pr_err("ubwcp: hw: %s(): " fmt "\n", __func__, ##args); \ } while (0) -#define ERR(fmt, args...) \ - do { \ - if (ubwcp_debug_trace_enable) \ - pr_err("ubwcp: hw: %s(): ~~~ERROR~~~: " fmt "\n", __func__, ##args); \ - } while (0) +#define ERR(fmt, args...) pr_err("ubwcp: hw: %s(): ~~~ERROR~~~: " fmt "\n", __func__, ##args); MODULE_LICENSE("GPL"); @@ -65,6 +61,8 @@ MODULE_LICENSE("GPL"); #define SPARE 0x1188 +#define UBWCP_DEBUG_REG_RW + /* read/write register */ #if defined(UBWCP_USE_SMC) #define UBWCP_REG_READ(_base, _offset) \ @@ -166,20 +164,20 @@ u64 ubwcp_hw_interrupt_src_address(void __iomem *base, u16 interrupt) switch (interrupt) { case INTERRUPT_READ_ERROR: - addr_low = UBWCP_REG_READ(base, INTERRUPT_READ_SRC_LOW); - addr_high = UBWCP_REG_READ(base, INTERRUPT_READ_SRC_HIGH) & 0xF; + addr_low = UBWCP_REG_READ_NO_DBG(base, INTERRUPT_READ_SRC_LOW); + addr_high = UBWCP_REG_READ_NO_DBG(base, INTERRUPT_READ_SRC_HIGH) & 0xF; break; case INTERRUPT_WRITE_ERROR: - addr_low = UBWCP_REG_READ(base, INTERRUPT_WRITE_SRC_LOW); - addr_high = UBWCP_REG_READ(base, INTERRUPT_WRITE_SRC_HIGH) & 0xF; + addr_low = UBWCP_REG_READ_NO_DBG(base, INTERRUPT_WRITE_SRC_LOW); + addr_high = UBWCP_REG_READ_NO_DBG(base, INTERRUPT_WRITE_SRC_HIGH) & 0xF; break; case INTERRUPT_DECODE_ERROR: - addr_low = UBWCP_REG_READ(base, INTERRUPT_DECODE_SRC_LOW); - addr_high = UBWCP_REG_READ(base, INTERRUPT_DECODE_SRC_HIGH) & 0xF; + addr_low = UBWCP_REG_READ_NO_DBG(base, INTERRUPT_DECODE_SRC_LOW); + addr_high = UBWCP_REG_READ_NO_DBG(base, INTERRUPT_DECODE_SRC_HIGH) & 0xF; break; case INTERRUPT_ENCODE_ERROR: - addr_low = UBWCP_REG_READ(base, INTERRUPT_ENCODE_SRC_LOW); - addr_high = UBWCP_REG_READ(base, INTERRUPT_ENCODE_SRC_HIGH) & 0xF; + addr_low = UBWCP_REG_READ_NO_DBG(base, INTERRUPT_ENCODE_SRC_LOW); + addr_high = UBWCP_REG_READ_NO_DBG(base, INTERRUPT_ENCODE_SRC_HIGH) & 0xF; break; default: /* TBD: fatal error? */ @@ -356,7 +354,6 @@ void ubwcp_hw_power_vote_status(void __iomem *pwr_ctrl, u8 *vote, u8 *status) reg = UBWCP_REG_READ(pwr_ctrl, 0); *vote = (reg & BIT(0)) >> 0; *status = (reg & BIT(31)) >> 31; - DBG("pwr_ctrl reg: 0x%x (vote = %d status = %d)", reg, *vote, *status); } void ubwcp_hw_one_time_init(void __iomem *base) @@ -383,3 +380,15 @@ void ubwcp_hw_one_time_init(void __iomem *base) ubwcp_hw_macro_tile_config(base); } EXPORT_SYMBOL(ubwcp_hw_one_time_init); + +void ubwcp_hw_trace_set(bool value) +{ + ubwcp_hw_trace_en = value; +} +EXPORT_SYMBOL(ubwcp_hw_trace_set); + +void ubwcp_hw_trace_get(bool *value) +{ + *value = ubwcp_hw_trace_en; +} +EXPORT_SYMBOL(ubwcp_hw_trace_get); diff --git a/ubwcp/ubwcp_hw.h b/ubwcp/ubwcp_hw.h index d83191f38f..5fc1f10247 100644 --- a/ubwcp/ubwcp_hw.h +++ b/ubwcp/ubwcp_hw.h @@ -66,8 +66,7 @@ void ubwcp_hw_interrupt_enable(void __iomem *base, u16 interrupt, bool enable); void ubwcp_hw_power_on(void __iomem *pwr_ctrl, bool power_on); void ubwcp_hw_one_time_init(void __iomem *base); int ubwcp_hw_flush(void __iomem *base); - -//#define UBWCP_USE_SMC -#define UBWCP_DEBUG_REG_RW +void ubwcp_hw_trace_set(bool value); +void ubwcp_hw_trace_get(bool *value); #endif /* __UBWCP_HW_H_ */ diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index 8e3881d89d..4541f70efb 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -48,8 +48,6 @@ MODULE_IMPORT_NS(DMA_BUF); #define UBWCP_ALIGN(_x, _y) ((((_x) + (_y) - 1)/(_y))*(_y)) -//#define DBG(fmt, args...) -//#define DBG_BUF_ATTR(fmt, args...) #define DBG_BUF_ATTR(fmt, args...) do { if (ubwcp_debug_trace_enable) \ pr_err("ubwcp: %s(): " fmt "\n", __func__, ##args); \ } while (0) @@ -57,6 +55,8 @@ MODULE_IMPORT_NS(DMA_BUF); pr_err("ubwcp: %s(): " fmt "\n", __func__, ##args); \ } while (0) #define ERR(fmt, args...) pr_err("ubwcp: %s(): ~~~ERROR~~~: " fmt "\n", __func__, ##args) +#define ERR_RATE_LIMIT(fmt, args...) pr_err_ratelimited("ubwcp: %s(): ~~~ERROR~~~: " fmt "\n",\ + __func__, ##args) #define FENTRY() DBG("") @@ -111,6 +111,10 @@ struct ubwcp_driver { /* debugfs */ struct dentry *debugfs_root; + bool read_err_irq_en; + bool write_err_irq_en; + bool decode_err_irq_en; + bool encode_err_irq_en; /* ubwcp devices */ struct device *dev; //ubwcp device @@ -194,8 +198,7 @@ struct ubwcp_buf { static struct ubwcp_driver *me; -static int error_print_count; -u32 ubwcp_debug_trace_enable; +static u32 ubwcp_debug_trace_enable; static struct ubwcp_driver *ubwcp_get_driver(void) { @@ -2119,7 +2122,6 @@ static int ubwcp_unlock(struct dma_buf *dmabuf, enum dma_data_direction dir) return -1; } - error_print_count = 0; mutex_lock(&buf->lock); ret = unlock_internal(buf, dir, false); mutex_unlock(&buf->lock); @@ -2404,21 +2406,169 @@ static const struct file_operations ubwcp_fops = { .unlocked_ioctl = ubwcp_ioctl, }; +static int read_err_r_op(void *data, u64 *value) +{ + struct ubwcp_driver *ubwcp = data; + *value = ubwcp->read_err_irq_en; + return 0; +} -static int ubwcp_debugfs_init(struct ubwcp_driver *ubwcp) +static int read_err_w_op(void *data, u64 value) +{ + struct ubwcp_driver *ubwcp = data; + + if (ubwcp_power(ubwcp, true)) + return -1; + + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_READ_ERROR, value); + ubwcp->read_err_irq_en = value; + + if (ubwcp_power(ubwcp, false)) + return -1; + + return 0; +} + +static int write_err_r_op(void *data, u64 *value) +{ + struct ubwcp_driver *ubwcp = data; + *value = ubwcp->write_err_irq_en; + return 0; +} + +static int write_err_w_op(void *data, u64 value) +{ + struct ubwcp_driver *ubwcp = data; + + if (ubwcp_power(ubwcp, true)) + return -1; + + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_WRITE_ERROR, value); + ubwcp->write_err_irq_en = value; + + if (ubwcp_power(ubwcp, false)) + return -1; + + return 0; +} + +static int decode_err_r_op(void *data, u64 *value) +{ + struct ubwcp_driver *ubwcp = data; + *value = ubwcp->decode_err_irq_en; + return 0; +} + +static int decode_err_w_op(void *data, u64 value) +{ + struct ubwcp_driver *ubwcp = data; + + if (ubwcp_power(ubwcp, true)) + return -1; + + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_DECODE_ERROR, value); + ubwcp->decode_err_irq_en = value; + + if (ubwcp_power(ubwcp, false)) + return -1; + + return 0; +} + +static int encode_err_r_op(void *data, u64 *value) +{ + struct ubwcp_driver *ubwcp = data; + *value = ubwcp->encode_err_irq_en; + return 0; +} + +static int encode_err_w_op(void *data, u64 value) +{ + struct ubwcp_driver *ubwcp = data; + + if (ubwcp_power(ubwcp, true)) + return -1; + + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_ENCODE_ERROR, value); + ubwcp->encode_err_irq_en = value; + + if (ubwcp_power(ubwcp, false)) + return -1; + + return 0; +} + +static int reg_rw_trace_w_op(void *data, u64 value) +{ + ubwcp_hw_trace_set(value); + return 0; +} + +static int reg_rw_trace_r_op(void *data, u64 *value) +{ + bool trace_status; + ubwcp_hw_trace_get(&trace_status); + *value = trace_status; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(read_err_fops, read_err_r_op, read_err_w_op, "%d\n"); +DEFINE_DEBUGFS_ATTRIBUTE(decode_err_fops, decode_err_r_op, decode_err_w_op, "%d\n"); +DEFINE_DEBUGFS_ATTRIBUTE(write_err_fops, write_err_r_op, write_err_w_op, "%d\n"); +DEFINE_DEBUGFS_ATTRIBUTE(encode_err_fops, encode_err_r_op, encode_err_w_op, "%d\n"); +DEFINE_DEBUGFS_ATTRIBUTE(reg_rw_trace_fops, reg_rw_trace_r_op, reg_rw_trace_w_op, "%d\n"); + +static void ubwcp_debugfs_init(struct ubwcp_driver *ubwcp) { struct dentry *debugfs_root; + struct dentry *dfile; debugfs_root = debugfs_create_dir("ubwcp", NULL); - if (!debugfs_root) { - pr_warn("Failed to create debugfs for ubwcp\n"); - return -1; + if (IS_ERR_OR_NULL(debugfs_root)) { + ERR("Failed to create debugfs for ubwcp\n"); + return; } debugfs_create_u32("debug_trace_enable", 0644, debugfs_root, &ubwcp_debug_trace_enable); + dfile = debugfs_create_file("reg_rw_trace_en", 0644, debugfs_root, ubwcp, ®_rw_trace_fops); + if (IS_ERR_OR_NULL(dfile)) { + ERR("failed to create reg_rw_trace_en debugfs file"); + goto err; + } + + dfile = debugfs_create_file("read_err_irq_en", 0644, debugfs_root, ubwcp, &read_err_fops); + if (IS_ERR_OR_NULL(dfile)) { + ERR("failed to create read_err_irq debugfs file"); + goto err; + } + + dfile = debugfs_create_file("write_err_irq_en", 0644, debugfs_root, ubwcp, &write_err_fops); + if (IS_ERR_OR_NULL(dfile)) { + ERR("failed to create write_err_irq debugfs file"); + goto err; + } + + dfile = debugfs_create_file("decode_err_irq_en", 0644, debugfs_root, ubwcp, + &decode_err_fops); + if (IS_ERR_OR_NULL(dfile)) { + ERR("failed to create decode_err_irq debugfs file"); + goto err; + } + + dfile = debugfs_create_file("encode_err_irq_en", 0644, debugfs_root, ubwcp, + &encode_err_fops); + if (IS_ERR_OR_NULL(dfile)) { + ERR("failed to create encode_err_irq debugfs file"); + goto err; + } + ubwcp->debugfs_root = debugfs_root; - return 0; + return; + +err: + debugfs_remove_recursive(ubwcp->debugfs_root); + ubwcp->debugfs_root = NULL; } static void ubwcp_debugfs_deinit(struct ubwcp_driver *ubwcp) @@ -2620,9 +2770,6 @@ static struct dma_buf *get_dma_buf_from_iova(unsigned long addr) return ret_buf; } -#define ERR_PRINT_COUNT_MAX 21 -/* TBD: use proper rate limit for debug prints */ - int ubwcp_iommu_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags, void *data) { @@ -2636,89 +2783,75 @@ int ubwcp_iommu_fault_handler(struct iommu_domain *domain, struct device *dev, goto err; } - error_print_count++; - if (error_print_count < ERR_PRINT_COUNT_MAX) { - err.err_code = UBWCP_SMMU_FAULT; + err.err_code = UBWCP_SMMU_FAULT; - if (cb_dev == ubwcp->dev_desc_cb) - err.smmu_err.iommu_dev_id = UBWCP_DESC_CB_ID; - else if (cb_dev == ubwcp->dev_buf_cb) - err.smmu_err.iommu_dev_id = UBWCP_BUF_CB_ID; - else - err.smmu_err.iommu_dev_id = UBWCP_UNKNOWN_CB_ID; - - ERR("smmu fault error: iommu_dev_id:%d iova 0x%llx flags:0x%x", - err.smmu_err.iommu_dev_id, iova, flags); - err.smmu_err.dmabuf = get_dma_buf_from_iova(iova); - err.smmu_err.iova = iova; - err.smmu_err.iommu_fault_flags = flags; - ubwcp_notify_error_handlers(&err); - } + if (cb_dev == ubwcp->dev_desc_cb) + err.smmu_err.iommu_dev_id = UBWCP_DESC_CB_ID; + else if (cb_dev == ubwcp->dev_buf_cb) + err.smmu_err.iommu_dev_id = UBWCP_BUF_CB_ID; + else + err.smmu_err.iommu_dev_id = UBWCP_UNKNOWN_CB_ID; + err.smmu_err.dmabuf = get_dma_buf_from_iova(iova); + err.smmu_err.iova = iova; + err.smmu_err.iommu_fault_flags = flags; + ERR_RATE_LIMIT("ubwcp_err: err code: %d (smmu), iommu_dev_id: %d, iova: 0x%llx, flags: 0x%x", + err.err_code, err.smmu_err.iommu_dev_id, err.smmu_err.iova, + err.smmu_err.iommu_fault_flags); + ubwcp_notify_error_handlers(&err); err: return ret; } - irqreturn_t ubwcp_irq_handler(int irq, void *ptr) { struct ubwcp_driver *ubwcp; void __iomem *base; - u64 src; phys_addr_t addr; struct ubwcp_err_info err; - error_print_count++; - ubwcp = (struct ubwcp_driver *) ptr; base = ubwcp->base; if (irq == ubwcp->irq_range_ck_rd) { - if (error_print_count < ERR_PRINT_COUNT_MAX) { - src = ubwcp_hw_interrupt_src_address(base, 0); - addr = src << 6; - ERR("check range read error: src: 0x%llx", addr); - err.err_code = UBWCP_RANGE_TRANSLATION_ERROR; - err.translation_err.dmabuf = get_dma_buf_from_ulapa(addr); - err.translation_err.ula_pa = addr; - err.translation_err.read = true; - ubwcp_notify_error_handlers(&err); - } + addr = ubwcp_hw_interrupt_src_address(base, 0) << 6; + err.err_code = UBWCP_RANGE_TRANSLATION_ERROR; + err.translation_err.dmabuf = get_dma_buf_from_ulapa(addr); + err.translation_err.ula_pa = addr; + err.translation_err.read = true; + ERR_RATE_LIMIT("ubwcp_err: err code: %d (range), dmabuf: 0x%llx, read: %d, addr: 0x%llx", + err.err_code, err.translation_err.dmabuf, err.translation_err.read, addr); + ubwcp_notify_error_handlers(&err); ubwcp_hw_interrupt_clear(ubwcp->base, 0); + } else if (irq == ubwcp->irq_range_ck_wr) { - if (error_print_count < ERR_PRINT_COUNT_MAX) { - src = ubwcp_hw_interrupt_src_address(base, 1); - addr = src << 6; - ERR("check range write error: src: 0x%llx", addr); - err.err_code = UBWCP_RANGE_TRANSLATION_ERROR; - err.translation_err.dmabuf = get_dma_buf_from_ulapa(addr); - err.translation_err.ula_pa = addr; - err.translation_err.read = false; - ubwcp_notify_error_handlers(&err); - } + addr = ubwcp_hw_interrupt_src_address(base, 1) << 6; + err.err_code = UBWCP_RANGE_TRANSLATION_ERROR; + err.translation_err.dmabuf = get_dma_buf_from_ulapa(addr); + err.translation_err.ula_pa = addr; + err.translation_err.read = false; + ERR_RATE_LIMIT("ubwcp_err: err code: %d (range), dmabuf: 0x%llx, read: %d, addr: 0x%llx", + err.err_code, err.translation_err.dmabuf, err.translation_err.read, addr); + ubwcp_notify_error_handlers(&err); ubwcp_hw_interrupt_clear(ubwcp->base, 1); } else if (irq == ubwcp->irq_encode) { - if (error_print_count < ERR_PRINT_COUNT_MAX) { - src = ubwcp_hw_interrupt_src_address(base, 3); - addr = src << 6; - ERR("encode error: src: 0x%llx", addr); - err.err_code = UBWCP_ENCODE_ERROR; - err.enc_err.dmabuf = get_dma_buf_from_ulapa(addr); - err.enc_err.ula_pa = addr; - ubwcp_notify_error_handlers(&err); - } - ubwcp_hw_interrupt_clear(ubwcp->base, 3); //TBD: encode is bit-3 instead of bit-2 + addr = ubwcp_hw_interrupt_src_address(base, 3) << 6; + err.err_code = UBWCP_ENCODE_ERROR; + err.enc_err.dmabuf = get_dma_buf_from_ulapa(addr); + err.enc_err.ula_pa = addr; + ERR_RATE_LIMIT("ubwcp_err: err code: %d (encode), dmabuf: 0x%llx, addr: 0x%llx", + err.err_code, err.enc_err.dmabuf, addr); + ubwcp_notify_error_handlers(&err); + ubwcp_hw_interrupt_clear(ubwcp->base, 3); } else if (irq == ubwcp->irq_decode) { - if (error_print_count < ERR_PRINT_COUNT_MAX) { - src = ubwcp_hw_interrupt_src_address(base, 2); - addr = src << 6; - ERR("decode error: src: 0x%llx", addr); - err.err_code = UBWCP_DECODE_ERROR; - err.dec_err.dmabuf = get_dma_buf_from_ulapa(addr); - err.dec_err.ula_pa = addr; - ubwcp_notify_error_handlers(&err); - } - ubwcp_hw_interrupt_clear(ubwcp->base, 2); //TBD: decode is bit-2 instead of bit-3 + addr = ubwcp_hw_interrupt_src_address(base, 2) << 6; + err.err_code = UBWCP_DECODE_ERROR; + err.dec_err.dmabuf = get_dma_buf_from_ulapa(addr); + err.dec_err.ula_pa = addr; + ERR_RATE_LIMIT("ubwcp_err: err code: %d (decode), dmabuf: 0x%llx, addr: 0x%llx", + err.err_code, err.enc_err.dmabuf, addr); + ubwcp_notify_error_handlers(&err); + ubwcp_hw_interrupt_clear(ubwcp->base, 2); } else { ERR("unknown irq: %d", irq); return IRQ_NONE; @@ -2849,9 +2982,6 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) mutex_init(&ubwcp->hw_range_ck_lock); spin_lock_init(&ubwcp->err_handler_list_lock); - if (ubwcp_interrupt_register(pdev, ubwcp)) - return -1; - /* Regulator */ ubwcp->vdd = devm_regulator_get(ubwcp_dev, "vdd"); if (IS_ERR_OR_NULL(ubwcp->vdd)) { @@ -2872,9 +3002,17 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) if (ubwcp_cdev_init(ubwcp)) return -1; - if (ubwcp_debugfs_init(ubwcp)) + /* disable all interrupts (reset value has some interrupts enabled by default) */ + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_READ_ERROR, false); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_WRITE_ERROR, false); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_ENCODE_ERROR, false); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_DECODE_ERROR, false); + + if (ubwcp_interrupt_register(pdev, ubwcp)) return -1; + ubwcp_debugfs_init(ubwcp); + /* create ULA pool */ ubwcp->ula_pool = gen_pool_create(12, -1); if (!ubwcp->ula_pool) { @@ -2910,11 +3048,16 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) /* set pdev->dev->driver_data = ubwcp */ platform_set_drvdata(pdev, ubwcp); - /* enable all 4 interrupts */ - ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_READ_ERROR, true); - ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_WRITE_ERROR, true); - ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_ENCODE_ERROR, true); - ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_DECODE_ERROR, true); + + /* enable interrupts */ + if (ubwcp->read_err_irq_en) + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_READ_ERROR, true); + if (ubwcp->write_err_irq_en) + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_WRITE_ERROR, true); + if (ubwcp->decode_err_irq_en) + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_DECODE_ERROR, true); + if (ubwcp->encode_err_irq_en) + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_ENCODE_ERROR, true); /* Turn OFF until buffers are allocated */ if (ubwcp_power(ubwcp, false)) { From e49b12d76b0b2271f727a36c26fb9c53824f0130 Mon Sep 17 00:00:00 2001 From: John Moon Date: Wed, 19 Apr 2023 12:39:57 -0700 Subject: [PATCH 21/35] ubwcp: Adding missing header file Currently, the Bazel build fails due to a missing header file that is not copied to the sandbox. Add the missing file. Change-Id: I2301eb9d072e07433b4eea06479986c25a0894c5 Signed-off-by: John Moon --- ubwcp/define_modules.bzl | 1 + 1 file changed, 1 insertion(+) diff --git a/ubwcp/define_modules.bzl b/ubwcp/define_modules.bzl index e603953a25..965f354ba5 100644 --- a/ubwcp/define_modules.bzl +++ b/ubwcp/define_modules.bzl @@ -12,6 +12,7 @@ def define_modules(target, variant): "ubwcp_main.c", "ubwcp_hw.c", "ubwcp_hw.h", + "ubwcp_trace.h", ], hdrs=["include/uapi/ubwcp_ioctl.h", "include/kernel/ubwcp.h"], deps = ["//msm-kernel:all_headers"], From e643ade79c90034bfeebce5ec2d9cc56cbae0141 Mon Sep 17 00:00:00 2001 From: John Moon Date: Wed, 19 Apr 2023 12:40:39 -0700 Subject: [PATCH 22/35] ubwcp: Enable DDK build Enable the DDK build by default in Android.mk. Change-Id: Id364a32aec4a29d1e95dee0cb09c4e46784e328b Signed-off-by: John Moon --- ubwcp/Android.mk | 1 + 1 file changed, 1 insertion(+) diff --git a/ubwcp/Android.mk b/ubwcp/Android.mk index d55687fae1..b0761681f6 100644 --- a/ubwcp/Android.mk +++ b/ubwcp/Android.mk @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only LOCAL_PATH := $(call my-dir) +LOCAL_MODULE_DDK_BUILD := true include $(CLEAR_VARS) # For incremental compilation From 1d12729839f28bac1e92d4443d8965cfafba8718 Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Wed, 12 Apr 2023 12:09:05 -0700 Subject: [PATCH 23/35] ubwcp: update failure handling Put driver in fault state on critical failures. Other minor cleanup. Change-Id: I79154178ae57fe12a3ef87b51626fd420ccb55c0 Signed-off-by: Amol Jadi --- ubwcp/ubwcp_main.c | 549 +++++++++++++++++++++++++-------------------- 1 file changed, 310 insertions(+), 239 deletions(-) diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index d358b5532e..29e6668180 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -55,13 +55,12 @@ MODULE_IMPORT_NS(DMA_BUF); #define DBG(fmt, args...) do { if (ubwcp_debug_trace_enable) \ pr_err("ubwcp: %s(): " fmt "\n", __func__, ##args); \ } while (0) -#define ERR(fmt, args...) pr_err("ubwcp: %s(): ~~~ERROR~~~: " fmt "\n", __func__, ##args) +#define ERR(fmt, args...) pr_err("ubwcp: %d: %s(): ~~~ERROR~~~: " fmt "\n", __LINE__, __func__, ##args) #define ERR_RATE_LIMIT(fmt, args...) pr_err_ratelimited("ubwcp: %s(): ~~~ERROR~~~: " fmt "\n",\ __func__, ##args) #define FENTRY() DBG("") - #define META_DATA_PITCH_ALIGN 64 #define META_DATA_HEIGHT_ALIGN 16 #define META_DATA_SIZE_ALIGN 4096 @@ -69,6 +68,11 @@ MODULE_IMPORT_NS(DMA_BUF); #define UBWCP_SYNC_GRANULE 0x4000000L /* 64 MB */ +enum ula_remove_mem_status { + ULA_REMOVE_MEM_SUCCESS = 0, + ULA_REMOVE_MEM_ABORTED = 1 +}; + struct ubwcp_desc { int idx; void *ptr; @@ -103,6 +107,12 @@ enum ubwcp_std_image_format { STD_IMAGE_FORMAT_INVALID = 0xFF }; +enum ubwcp_state { + UBWCP_STATE_READY = 0, + UBWCP_STATE_INVALID = -1, + UBWCP_STATE_FAULT = -2, +}; + struct ubwcp_driver { /* cdev related */ dev_t devt; @@ -159,6 +169,8 @@ struct ubwcp_driver { struct ubwcp_image_format_info format_info[INFO_FORMAT_LIST_SIZE]; + /* driver state */ + enum ubwcp_state state; atomic_t num_non_lin_buffers; bool mem_online; @@ -309,69 +321,33 @@ static int ubwcp_power(struct ubwcp_driver *ubwcp, bool enable) { int ret = 0; - if (!ubwcp) { - ERR("ubwcp ptr is NULL"); - return -1; - } + if (enable) + ret = regulator_enable(ubwcp->vdd); + else + ret = regulator_disable(ubwcp->vdd); - if (!ubwcp->vdd) { - ERR("vdd is NULL"); - return -1; + if (ret) { + ERR("regulator call (enable: %d) failed: %d", enable, ret); + return ret; } if (enable) { - ret = regulator_enable(ubwcp->vdd); - if (ret < 0) { - ERR("regulator_enable failed: %d", ret); - ret = -1; - } else { - DBG("regulator_enable() success"); - } - - if (!ret) { - ret = ubwcp_enable_clocks(ubwcp); - if (ret) { - ERR("enable clocks failed: %d", ret); - regulator_disable(ubwcp->vdd); - } else { - DBG("enable clocks success"); - } + ret = ubwcp_enable_clocks(ubwcp); + if (ret) { + ERR("enable clocks failed: %d", ret); + regulator_disable(ubwcp->vdd); + return ret; } } else { - ret = regulator_disable(ubwcp->vdd); - if (ret < 0) { - ERR("regulator_disable failed: %d", ret); - ret = -1; - } else { - DBG("regulator_disable() success"); - } - - if (!ret) { - ubwcp_disable_clocks(ubwcp); - DBG("disable clocks success"); - } + ubwcp_disable_clocks(ubwcp); } return ret; } -static int ubwcp_flush(struct ubwcp_driver *ubwcp) -{ - int ret = 0; - - mutex_lock(&ubwcp->ubwcp_flush_lock); - ret = ubwcp_hw_flush(ubwcp->base); - mutex_unlock(&ubwcp->ubwcp_flush_lock); - if (ret != 0) - WARN(1, "ubwcp_hw_flush() failed!"); - - return ret; -} - - /* get dma_buf ptr for the given dma_buf fd */ -struct dma_buf *ubwcp_dma_buf_fd_to_dma_buf(int dma_buf_fd) +static struct dma_buf *ubwcp_dma_buf_fd_to_dma_buf(int dma_buf_fd) { struct dma_buf *dmabuf; @@ -389,7 +365,6 @@ struct dma_buf *ubwcp_dma_buf_fd_to_dma_buf(int dma_buf_fd) return dmabuf; } -EXPORT_SYMBOL(ubwcp_dma_buf_fd_to_dma_buf); /* get ubwcp_buf corresponding to the given dma_buf */ @@ -430,13 +405,16 @@ int ubwcp_get_hw_version(struct ubwcp_ioctl_hw_version *ver) if (!ubwcp) return -1; + if (ubwcp->state != UBWCP_STATE_FAULT) + return -EPERM; + ver->major = ubwcp->hw_ver_major; ver->minor = ubwcp->hw_ver_minor; return 0; } EXPORT_SYMBOL(ubwcp_get_hw_version); -static int add_ula_pa_memory(struct ubwcp_driver *ubwcp) +static int ula_add_mem(struct ubwcp_driver *ubwcp) { int ret = 0; int nid; @@ -458,7 +436,6 @@ static int add_ula_pa_memory(struct ubwcp_driver *ubwcp) ubwcp->ula_pool_base, ubwcp->ula_pool_size, ret); - /* Fix to put driver in invalid state */ } else { DBG("memremap_pages() ula_pool_base:0x%llx, size:0x%zx, kernel addr:0x%p", ubwcp->ula_pool_base, @@ -469,26 +446,96 @@ static int add_ula_pa_memory(struct ubwcp_driver *ubwcp) return ret; } -static int inc_num_non_lin_buffers(struct ubwcp_driver *ubwcp) +static int ula_map_uncached(u64 base, u64 size) +{ + int ret; + trace_ubwcp_set_direct_map_range_uncached_start(size); + ret = set_direct_map_range_uncached((unsigned long)phys_to_virt(base), size >> PAGE_SHIFT); + trace_ubwcp_set_direct_map_range_uncached_end(size); + if (ret) + ERR("set_direct_map_range_uncached failed st:0x%lx num pages:%lu err: %d", + base, size >> PAGE_SHIFT, ret); + return ret; +} + +static void ula_unmap(struct ubwcp_driver *ubwcp) +{ + DBG("Calling memunmap_pages() for ULA PA pool"); + trace_ubwcp_memunmap_pages_start(ubwcp->ula_pool_size); + memunmap_pages(&ubwcp->pgmap); + trace_ubwcp_memunmap_pages_end(ubwcp->ula_pool_size); +} + +static void ula_sync_for_cpu(struct device *dev, u64 addr, unsigned long size) +{ + DBG("Partial sync offset:0x%lx size:0x%lx", addr, size); + trace_ubwcp_dma_sync_single_for_cpu_start(size); + dma_sync_single_for_cpu(dev, addr, size, DMA_BIDIRECTIONAL); + trace_ubwcp_dma_sync_single_for_cpu_end(size); +} + +/** Remove ula memory in chunks + * Abort if new buffer addition is detected + * If remove succeeds or aborted, return success + * status value indicates if mem was removed or aborted (not removed) + * Otherwise return failure + */ +static int ula_remove_mem(struct ubwcp_driver *ubwcp, enum ula_remove_mem_status *status) { int ret = 0; + unsigned long sync_remain = ubwcp->ula_pool_size; + unsigned long sync_offset = 0; + unsigned long sync_size = 0; + + ret = ula_map_uncached(ubwcp->ula_pool_base, ubwcp->ula_pool_size); + if (ret) + return ret; + + trace_ubwcp_offline_sync_start(ubwcp->ula_pool_size); + while (sync_remain > 0) { + if (atomic_read(&ubwcp->num_non_lin_buffers) > 0) { + trace_ubwcp_offline_sync_end(ubwcp->ula_pool_size); + ula_unmap(ubwcp); + if (ula_add_mem(ubwcp)) { + ERR("remove mem: failed to add back during abort"); + return -1; + } + *status = ULA_REMOVE_MEM_ABORTED; + return 0; + } + + if (UBWCP_SYNC_GRANULE > sync_remain) { + sync_size = sync_remain; + sync_remain = 0; + } else { + sync_size = UBWCP_SYNC_GRANULE; + sync_remain -= UBWCP_SYNC_GRANULE; + } + + ula_sync_for_cpu(ubwcp->dev, ubwcp->ula_pool_base + sync_offset, sync_size); + sync_offset += sync_size; + } + trace_ubwcp_offline_sync_end(ubwcp->ula_pool_size); + ula_unmap(ubwcp); + *status = ULA_REMOVE_MEM_SUCCESS; + return 0; +} + +static int inc_num_non_lin_buffers(struct ubwcp_driver *ubwcp) +{ atomic_inc(&ubwcp->num_non_lin_buffers); mutex_lock(&ubwcp->mem_hotplug_lock); if (!ubwcp->mem_online) { if (atomic_read(&ubwcp->num_non_lin_buffers) == 0) { - ret = -EINVAL; ERR("Bad state: num_non_lin_buffers should not be 0"); - /* Fix to put driver in invalid state */ - goto err_power_on; + goto err; } - ret = ubwcp_power(ubwcp, true); - if (ret) - goto err_power_on; + if (ubwcp_power(ubwcp, true)) + goto err; - ret = add_ula_pa_memory(ubwcp); - if (ret) + if (ula_add_mem(ubwcp)) goto err_add_memory; ubwcp->mem_online = true; @@ -498,109 +545,49 @@ static int inc_num_non_lin_buffers(struct ubwcp_driver *ubwcp) err_add_memory: ubwcp_power(ubwcp, false); -err_power_on: +err: atomic_dec(&ubwcp->num_non_lin_buffers); mutex_unlock(&ubwcp->mem_hotplug_lock); - - return ret; + ubwcp->state = UBWCP_STATE_FAULT; + return -1; } static int dec_num_non_lin_buffers(struct ubwcp_driver *ubwcp) { - int ret = 0; - + int ret; + enum ula_remove_mem_status remove_status; atomic_dec(&ubwcp->num_non_lin_buffers); mutex_lock(&ubwcp->mem_hotplug_lock); - - /* If this is the last buffer being freed, power off ubwcp */ if (atomic_read(&ubwcp->num_non_lin_buffers) == 0) { - unsigned long sync_remain = 0; - unsigned long sync_offset = 0; - unsigned long sync_size = 0; - unsigned long sync_granule = UBWCP_SYNC_GRANULE; - DBG("last buffer: ~~~~~~~~~~~"); if (!ubwcp->mem_online) { - ret = -EINVAL; ERR("Bad state: mem_online should not be false"); - /* Fix to put driver in invalid state */ - goto err_remove_mem; + goto err; } - DBG("set_direct_map_range_uncached() for ULA PA pool st:0x%lx num pages:%lu", - ubwcp->ula_pool_base, ubwcp->ula_pool_size >> PAGE_SHIFT); - trace_ubwcp_set_direct_map_range_uncached_start(ubwcp->ula_pool_size); - ret = set_direct_map_range_uncached((unsigned long)phys_to_virt( - ubwcp->ula_pool_base), ubwcp->ula_pool_size >> PAGE_SHIFT); - trace_ubwcp_set_direct_map_range_uncached_end(ubwcp->ula_pool_size); - if (ret) { - ERR("set_direct_map_range_uncached failed st:0x%lx num pages:%lu err: %d", - ubwcp->ula_pool_base, - ubwcp->ula_pool_size >> PAGE_SHIFT, ret); - goto err_remove_mem; + ret = ula_remove_mem(ubwcp, &remove_status); + if (ret) + goto err; + + if (remove_status == ULA_REMOVE_MEM_SUCCESS) { + ubwcp->mem_online = false; + if (ubwcp_power(ubwcp, false)) + goto err; + } else if (remove_status == ULA_REMOVE_MEM_ABORTED) { + DBG("ula memory offline aborted"); } else { - DBG("DONE: calling set_direct_map_range_uncached() for ULA PA pool"); + ERR("unexpected ula remove status: %d", remove_status); + goto err; } - - DBG("Calling dma_sync_single_for_cpu() for ULA PA pool"); - trace_ubwcp_offline_sync_start(ubwcp->ula_pool_size); - - sync_remain = ubwcp->ula_pool_size; - sync_offset = 0; - while (sync_remain > 0) { - if (atomic_read(&ubwcp->num_non_lin_buffers) > 0) { - - trace_ubwcp_offline_sync_end(ubwcp->ula_pool_size); - DBG("Cancel memory offlining"); - - DBG("Calling memunmap_pages() for ULA PA pool"); - trace_ubwcp_memunmap_pages_start(ubwcp->ula_pool_size); - memunmap_pages(&ubwcp->pgmap); - trace_ubwcp_memunmap_pages_end(ubwcp->ula_pool_size); - ret = add_ula_pa_memory(ubwcp); - if (ret) { - ERR("Bad state: failed to add back memory"); - /* Fix to put driver in invalid state */ - ubwcp->mem_online = false; - } - mutex_unlock(&ubwcp->mem_hotplug_lock); - return ret; - } - - if (sync_granule > sync_remain) { - sync_size = sync_remain; - sync_remain = 0; - } else { - sync_size = sync_granule; - sync_remain -= sync_granule; - } - - DBG("Partial sync offset:0x%lx size:0x%lx", sync_offset, sync_size); - trace_ubwcp_dma_sync_single_for_cpu_start(sync_size); - dma_sync_single_for_cpu(ubwcp->dev, ubwcp->ula_pool_base + sync_offset, - sync_size, DMA_BIDIRECTIONAL); - trace_ubwcp_dma_sync_single_for_cpu_end(sync_size); - sync_offset += sync_size; - } - trace_ubwcp_offline_sync_end(ubwcp->ula_pool_size); - DBG("Calling memunmap_pages() for ULA PA pool"); - trace_ubwcp_memunmap_pages_start(ubwcp->ula_pool_size); - memunmap_pages(&ubwcp->pgmap); - trace_ubwcp_memunmap_pages_end(ubwcp->ula_pool_size); - - DBG("Calling power OFF ..."); - ubwcp_power(ubwcp, false); - ubwcp->mem_online = false; } mutex_unlock(&ubwcp->mem_hotplug_lock); return 0; -err_remove_mem: +err: atomic_inc(&ubwcp->num_non_lin_buffers); mutex_unlock(&ubwcp->mem_hotplug_lock); - - DBG("returning error: %d", ret); - return ret; + ubwcp->state = UBWCP_STATE_FAULT; + return -1; } /** @@ -627,6 +614,12 @@ static int ubwcp_init_buffer(struct dma_buf *dmabuf) return -1; } + if (ubwcp->state != UBWCP_STATE_READY) { + ERR("driver in invalid state: %d", ubwcp->state); + trace_ubwcp_init_buffer_end(dmabuf); + return -EPERM; + } + if (!dmabuf) { ERR("NULL dmabuf input ptr"); trace_ubwcp_init_buffer_end(dmabuf); @@ -897,38 +890,6 @@ err: return false; } - -/* return true if image format has only Y plane*/ -bool ubwcp_image_y_only(u16 format) -{ - switch (format) { - case UBWCP_NV12_Y: - case UBWCP_NV124R_Y: - case UBWCP_TP10_Y: - case UBWCP_P010_Y: - case UBWCP_P016_Y: - return true; - default: - return false; - } -} - - -/* return true if image format has only UV plane*/ -bool ubwcp_image_uv_only(u16 format) -{ - switch (format) { - case UBWCP_NV12_UV: - case UBWCP_NV124R_UV: - case UBWCP_TP10_UV: - case UBWCP_P010_UV: - case UBWCP_P016_UV: - return true; - default: - return false; - } -} - /* calculate and return metadata buffer size for a given plane * and buffer attributes * NOTE: in this function, we will only pass in NV12 format. @@ -1095,7 +1056,7 @@ static size_t ubwcp_ula_size(struct ubwcp_driver *ubwcp, u16 format, return size; } -int missing_plane_from_format(u16 ioctl_image_format) +static int missing_plane_from_format(u16 ioctl_image_format) { int missing_plane; @@ -1136,7 +1097,7 @@ int missing_plane_from_format(u16 ioctl_image_format) return missing_plane; } -int planes_in_format(enum ubwcp_std_image_format format) +static int planes_in_format(enum ubwcp_std_image_format format) { if (format == RGBA) return 1; @@ -1144,7 +1105,7 @@ int planes_in_format(enum ubwcp_std_image_format format) return 2; } -unsigned int ubwcp_get_hw_image_format_value(u16 ioctl_image_format) +static unsigned int ubwcp_get_hw_image_format_value(u16 ioctl_image_format) { enum ubwcp_std_image_format format; @@ -1595,6 +1556,10 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) return -EINVAL; } + ubwcp = buf->ubwcp; + if (ubwcp->state != UBWCP_STATE_READY) + return -EPERM; + mutex_lock(&buf->lock); if (buf->locked) { @@ -1603,7 +1568,6 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) goto unlock; } - ubwcp = buf->ubwcp; mmdata = &buf->mmdata; is_non_lin_buf = (buf->buf_attr.image_format != UBWCP_LINEAR); @@ -1699,19 +1663,6 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) DBG_BUF_ATTR("iova_min_size : %8d (0x%8zx)", iova_min_size, iova_min_size); DBG_BUF_ATTR(""); - if (buf->buf_attr_set) { - /* if buf attr were previously set, these must not be 0 */ - /* TBD: do we need this check in production code? */ - if (!buf->ula_pa) { - WARN(1, "ula_pa cannot be 0 if buf_attr_set is true!!!"); - goto err; - } - if (!buf->ula_size) { - WARN(1, "ula_size cannot be 0 if buf_attr_set is true!!!"); - goto err; - } - } - /* assign ULA PA with uncompressed-size range */ ula_pa = ubwcp_ula_realloc(ubwcp, buf->ula_pa, buf->ula_size, ula_size); if (!ula_pa) { @@ -1873,6 +1824,39 @@ static struct ubwcp_desc *ubwcp_buf_desc_allocate(struct ubwcp_driver *ubwcp) return NULL; } +static int ubwcp_flush(struct ubwcp_driver *ubwcp) +{ + int ret = 0; + mutex_lock(&ubwcp->ubwcp_flush_lock); + trace_ubwcp_hw_flush_start(0); + ret = ubwcp_hw_flush(ubwcp->base); + trace_ubwcp_hw_flush_end(0); + if (ret) + ERR("ubwcp_hw_flush() failed, ret = %d", ret); + mutex_unlock(&ubwcp->ubwcp_flush_lock); + return ret; +} + +static int range_check_disable(struct ubwcp_driver *ubwcp, int idx) +{ + int ret; + mutex_lock(&ubwcp->ubwcp_flush_lock); + mutex_lock(&ubwcp->hw_range_ck_lock); + trace_ubwcp_hw_flush_start(0); + ret = ubwcp_hw_disable_range_check_with_flush(ubwcp->base, idx); + trace_ubwcp_hw_flush_end(0); + mutex_unlock(&ubwcp->hw_range_ck_lock); + mutex_unlock(&ubwcp->ubwcp_flush_lock); + return ret; +} + +static void range_check_enable(struct ubwcp_driver *ubwcp, int idx) +{ + mutex_lock(&ubwcp->hw_range_ck_lock); + ubwcp_hw_enable_range_check(ubwcp->base, idx); + mutex_unlock(&ubwcp->hw_range_ck_lock); +} + /** * Lock buffer for CPU access. This prepares ubwcp hw to allow * CPU access to the compressed buffer. It will perform @@ -1902,7 +1886,6 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir) return -EINVAL; } - if (!valid_dma_direction(dir)) { ERR("invalid direction: %d", dir); trace_ubwcp_lock_end(dmabuf); @@ -1916,6 +1899,13 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir) return -1; } + ubwcp = buf->ubwcp; + if (ubwcp->state != UBWCP_STATE_READY) { + ERR("driver in invalid state: %d", ubwcp->state); + trace_ubwcp_lock_end(dmabuf); + return -EPERM; + } + mutex_lock(&buf->lock); if (!buf->buf_attr_set) { @@ -1930,7 +1920,6 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir) if (!buf->locked) { DBG("first lock on buffer"); - ubwcp = buf->ubwcp; /* buf->desc could already be allocated because of perm range xlation */ if (!buf->desc) { @@ -1972,9 +1961,7 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir) /* enable range check */ DBG("enabling range check, descriptor_id: %d", buf->desc->idx); - mutex_lock(&ubwcp->hw_range_ck_lock); - ubwcp_hw_enable_range_check(ubwcp->base, buf->desc->idx); - mutex_unlock(&ubwcp->hw_range_ck_lock); + range_check_enable(ubwcp, buf->desc->idx); /* Flush/invalidate UBWCP caches */ /* Why: cpu could have done a speculative fetch before @@ -1983,9 +1970,12 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir) * we force completion of that and then we also cpu invalidate which * will get rid of that line. */ - trace_ubwcp_hw_flush_start(buf->ula_size); - ubwcp_flush(ubwcp); - trace_ubwcp_hw_flush_end(buf->ula_size); + ret = ubwcp_flush(ubwcp); + if (ret) { + ubwcp->state = UBWCP_STATE_FAULT; + ERR("ubwcp_flush() failed: %d, driver state set to FAULT", ret); + goto err_flush_failed; + } /* Flush/invalidate ULA PA from CPU caches * TBD: if (dir == READ or BIDIRECTION) //NOT for write @@ -2008,6 +1998,10 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir) trace_ubwcp_lock_end(dmabuf); return ret; +err_flush_failed: + range_check_disable(ubwcp, buf->desc->idx); + ubwcp_buf_desc_free(ubwcp, buf->desc); + buf->desc = NULL; err: mutex_unlock(&buf->lock); if (!ret) @@ -2045,18 +2039,13 @@ static int unlock_internal(struct ubwcp_buf *buf, enum dma_data_direction dir, b dma_sync_single_for_device(ubwcp->dev, buf->ula_pa, buf->ula_size, dir); trace_ubwcp_dma_sync_single_for_device_end(buf->ula_size); - /* disable range check with ubwcp flush */ + /* disable range check */ DBG("disabling range check"); - //TBD: could combine these 2 locks into a single lock to make it simpler - mutex_lock(&ubwcp->ubwcp_flush_lock); - mutex_lock(&ubwcp->hw_range_ck_lock); - trace_ubwcp_hw_flush_start(buf->ula_size); - ret = ubwcp_hw_disable_range_check_with_flush(ubwcp->base, buf->desc->idx); - trace_ubwcp_hw_flush_end(buf->ula_size); - if (ret) - ERR("disable_range_check_with_flush() failed: %d", ret); - mutex_unlock(&ubwcp->hw_range_ck_lock); - mutex_unlock(&ubwcp->ubwcp_flush_lock); + ret = range_check_disable(ubwcp, buf->desc->idx); + if (ret) { + ubwcp->state = UBWCP_STATE_FAULT; + ERR("disable_range_check_with_flush() failed: %d, driver state set to FAULT", ret); + } /* release descriptor if perm range xlation is not set */ if (!buf->perm) { @@ -2106,6 +2095,12 @@ static int ubwcp_unlock(struct dma_buf *dmabuf, enum dma_data_direction dir) return -1; } + if (buf->ubwcp->state != UBWCP_STATE_READY) { + ERR("driver in invalid state: %d", buf->ubwcp->state); + trace_ubwcp_unlock_end(dmabuf); + return -EPERM; + } + if (!buf->locked) { ERR("unlock() called on buffer which not in locked state"); trace_ubwcp_unlock_end(dmabuf); @@ -2144,6 +2139,11 @@ int ubwcp_get_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) return -1; } + if (buf->ubwcp->state != UBWCP_STATE_READY) { + ERR("driver in invalid state: %d", buf->ubwcp->state); + return -EPERM; + } + mutex_lock(&buf->lock); if (!buf->buf_attr_set) { ERR("buffer attributes not set"); @@ -2185,6 +2185,11 @@ int ubwcp_set_perm_range_translation(struct dma_buf *dmabuf, bool enable) return -1; } + if (buf->ubwcp->state != UBWCP_STATE_READY) { + ERR("driver in invalid state: %d", buf->ubwcp->state); + return -EPERM; + } + /* not implemented */ if (1) { ERR("API not implemented yet"); @@ -2242,8 +2247,14 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) return -1; } - mutex_lock(&buf->lock); ubwcp = buf->ubwcp; + if (ubwcp->state != UBWCP_STATE_READY) { + ERR("driver in invalid state: %d", ubwcp->state); + trace_ubwcp_free_buffer_end(dmabuf); + return -EPERM; + } + + mutex_lock(&buf->lock); is_non_lin_buf = (buf->buf_attr.image_format != UBWCP_LINEAR); if (buf->locked) { @@ -2273,7 +2284,7 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) dec_num_non_lin_buffers(ubwcp); trace_ubwcp_free_buffer_end(dmabuf); - return 0; + return ret; } @@ -2300,6 +2311,15 @@ static long ubwcp_ioctl(struct file *file, unsigned int ioctl_num, unsigned long enum ubwcp_std_image_format format; struct ubwcp_driver *ubwcp; + ubwcp = ubwcp_get_driver(); + if (!ubwcp) + return -EINVAL; + + if (ubwcp->state != UBWCP_STATE_READY) { + ERR("driver in invalid state: %d", ubwcp->state); + return -EPERM; + } + switch (ioctl_num) { case UBWCP_IOCTL_SET_BUF_ATTR: if (copy_from_user(&buf_attr_ioctl, (const void __user *) ioctl_param, @@ -2348,10 +2368,6 @@ static long ubwcp_ioctl(struct file *file, unsigned int ioctl_num, unsigned long case UBWCP_IOCTL_VALIDATE_STRIDE: DBG("IOCTL : VALIDATE_STRIDE"); - ubwcp = ubwcp_get_driver(); - if (!ubwcp) - return -EINVAL; - if (copy_from_user(&validate_stride_ioctl, (const void __user *) ioctl_param, sizeof(validate_stride_ioctl))) { ERR("ERROR: copy_from_user() failed"); @@ -2407,21 +2423,31 @@ static int read_err_w_op(void *data, u64 value) { struct ubwcp_driver *ubwcp = data; + if (ubwcp->state != UBWCP_STATE_READY) + return -EPERM; + if (ubwcp_power(ubwcp, true)) - return -1; + goto err; ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_READ_ERROR, value); ubwcp->read_err_irq_en = value; if (ubwcp_power(ubwcp, false)) - return -1; + goto err; return 0; +err: + ubwcp->state = UBWCP_STATE_FAULT; + return -1; } static int write_err_r_op(void *data, u64 *value) { struct ubwcp_driver *ubwcp = data; + + if (ubwcp->state != UBWCP_STATE_READY) + return -EPERM; + *value = ubwcp->write_err_irq_en; return 0; } @@ -2430,21 +2456,31 @@ static int write_err_w_op(void *data, u64 value) { struct ubwcp_driver *ubwcp = data; + if (ubwcp->state != UBWCP_STATE_READY) + return -EPERM; + if (ubwcp_power(ubwcp, true)) - return -1; + goto err; ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_WRITE_ERROR, value); ubwcp->write_err_irq_en = value; if (ubwcp_power(ubwcp, false)) - return -1; + goto err; return 0; +err: + ubwcp->state = UBWCP_STATE_FAULT; + return -1; } static int decode_err_r_op(void *data, u64 *value) { struct ubwcp_driver *ubwcp = data; + + if (ubwcp->state != UBWCP_STATE_READY) + return -EPERM; + *value = ubwcp->decode_err_irq_en; return 0; } @@ -2453,21 +2489,31 @@ static int decode_err_w_op(void *data, u64 value) { struct ubwcp_driver *ubwcp = data; + if (ubwcp->state != UBWCP_STATE_READY) + return -EPERM; + if (ubwcp_power(ubwcp, true)) - return -1; + goto err; ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_DECODE_ERROR, value); ubwcp->decode_err_irq_en = value; if (ubwcp_power(ubwcp, false)) - return -1; + goto err; return 0; +err: + ubwcp->state = UBWCP_STATE_FAULT; + return -1; } static int encode_err_r_op(void *data, u64 *value) { struct ubwcp_driver *ubwcp = data; + + if (ubwcp->state != UBWCP_STATE_READY) + return -EPERM; + *value = ubwcp->encode_err_irq_en; return 0; } @@ -2476,27 +2522,43 @@ static int encode_err_w_op(void *data, u64 value) { struct ubwcp_driver *ubwcp = data; + if (ubwcp->state != UBWCP_STATE_READY) + return -EPERM; + if (ubwcp_power(ubwcp, true)) - return -1; + goto err; ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_ENCODE_ERROR, value); ubwcp->encode_err_irq_en = value; if (ubwcp_power(ubwcp, false)) - return -1; + goto err; return 0; +err: + ubwcp->state = UBWCP_STATE_FAULT; + return -1; } static int reg_rw_trace_w_op(void *data, u64 value) { + struct ubwcp_driver *ubwcp = data; + + if (ubwcp->state != UBWCP_STATE_READY) + return -EPERM; + ubwcp_hw_trace_set(value); return 0; } static int reg_rw_trace_r_op(void *data, u64 *value) { + struct ubwcp_driver *ubwcp = data; bool trace_status; + + if (ubwcp->state != UBWCP_STATE_READY) + return -EPERM; + ubwcp_hw_trace_get(&trace_status); *value = trace_status; return 0; @@ -2647,6 +2709,9 @@ int ubwcp_register_error_handler(u32 client_id, ubwcp_error_handler_t handler, if (!handler) return -EINVAL; + if (ubwcp->state != UBWCP_STATE_READY) + return -EPERM; + node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM; @@ -2689,6 +2754,9 @@ int ubwcp_unregister_error_handler(u32 client_id) if (!ubwcp) return -EINVAL; + if (ubwcp->state != UBWCP_STATE_INVALID) + return -EPERM; + spin_lock_irqsave(&ubwcp->err_handler_list_lock, flags); list_for_each_entry(node, &ubwcp->err_handler_list, list) if (node->client_id == client_id) { @@ -2793,7 +2861,7 @@ err: return ret; } -irqreturn_t ubwcp_irq_handler(int irq, void *ptr) +static irqreturn_t ubwcp_irq_handler(int irq, void *ptr) { struct ubwcp_driver *ubwcp; void __iomem *base; @@ -2961,6 +3029,8 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) INIT_LIST_HEAD(&ubwcp->err_handler_list); + /* driver initial state */ + ubwcp->state = UBWCP_STATE_INVALID; atomic_set(&ubwcp->num_non_lin_buffers, 0); ubwcp->mem_online = false; @@ -3057,13 +3127,8 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) ret = msm_ubwcp_set_ops(ubwcp_init_buffer, ubwcp_free_buffer, ubwcp_lock, ubwcp_unlock); if (ret) { - ERR("msm_ubwcp_set_ops() failed: %d, but IGNORED", ret); - /* TBD: ignore return error during testing phase. - * This allows us to rmmod/insmod for faster dev cycle. - * In final version: return error and de-register driver if set_ops fails. - */ - ret = 0; - //goto err_power_off; + ERR("msm_ubwcp_set_ops() failed: %d", ret); + goto err_power_off; } else { DBG("msm_ubwcp_set_ops(): success"); } @@ -3097,13 +3162,14 @@ static int ubwcp_probe_cb_buf(struct platform_device *pdev) return -EINVAL; } - /* save the buffer cb device */ ubwcp->dev_buf_cb = &pdev->dev; - domain = iommu_get_domain_for_dev(ubwcp->dev_buf_cb); if (domain) iommu_set_fault_handler(domain, ubwcp_iommu_fault_handler, ubwcp->dev_buf_cb); + if (ubwcp->dev_desc_cb) + ubwcp->state = UBWCP_STATE_READY; + return 0; } @@ -3124,7 +3190,6 @@ static int ubwcp_probe_cb_desc(struct platform_device *pdev) ubwcp->buffer_desc_size = UBWCP_BUFFER_DESC_OFFSET * UBWCP_BUFFER_DESC_COUNT; - ubwcp->dev_desc_cb = &pdev->dev; dma_set_max_seg_size(ubwcp->dev_desc_cb, DMA_BIT_MASK(32)); @@ -3163,6 +3228,9 @@ static int ubwcp_probe_cb_desc(struct platform_device *pdev) if (domain) iommu_set_fault_handler(domain, ubwcp_iommu_fault_handler, ubwcp->dev_desc_cb); + if (ubwcp->dev_buf_cb) + ubwcp->state = UBWCP_STATE_READY; + return ret; err: @@ -3189,7 +3257,7 @@ static int ubwcp_remove_cb_buf(struct platform_device *pdev) return -EINVAL; } - /* remove buf_cb reference */ + ubwcp->state = UBWCP_STATE_INVALID; ubwcp->dev_buf_cb = NULL; return 0; } @@ -3216,6 +3284,7 @@ static int ubwcp_remove_cb_desc(struct platform_device *pdev) ubwcp_hw_set_buf_desc(ubwcp->base, 0x0, 0x0); ubwcp_power(ubwcp, false); + ubwcp->state = UBWCP_STATE_INVALID; dma_free_coherent(ubwcp->dev_desc_cb, ubwcp->buffer_desc_size, ubwcp->buffer_desc_base, @@ -3248,6 +3317,8 @@ static int qcom_ubwcp_remove(struct platform_device *pdev) ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_DECODE_ERROR, false); ubwcp_power(ubwcp, false); + ubwcp->state = UBWCP_STATE_INVALID; + /* before destroying, make sure pool is empty. otherwise pool_destroy() panics. * TBD: remove this check for production code and let it panic */ From ff5feb8adc46f245ea2b420833af5674df8fbf59 Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Wed, 19 Apr 2023 17:59:55 -0700 Subject: [PATCH 24/35] ubwcp: cleanup buffer lock tracking Replaced use of locked with lock_count. Change-Id: I5a963850b79ed425cc2eeee5e94cb44104cdccc3 Signed-off-by: Amol Jadi --- ubwcp/ubwcp_main.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index 29e6668180..4ae74aa8b0 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -193,7 +193,6 @@ struct ubwcp_buf { bool perm; struct ubwcp_desc *desc; bool buf_attr_set; - bool locked; enum dma_data_direction lock_dir; int lock_count; @@ -1562,7 +1561,7 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) mutex_lock(&buf->lock); - if (buf->locked) { + if (buf->lock_count) { ERR("Cannot set attr when buffer is locked"); ret = -EBUSY; goto unlock; @@ -1918,7 +1917,7 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir) goto err; } - if (!buf->locked) { + if (!buf->lock_count) { DBG("first lock on buffer"); /* buf->desc could already be allocated because of perm range xlation */ @@ -1985,7 +1984,6 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir) dma_sync_single_for_cpu(ubwcp->dev, buf->ula_pa, buf->ula_size, dir); trace_ubwcp_dma_sync_single_for_cpu_end(buf->ula_size); buf->lock_dir = dir; - buf->locked = true; } else { DBG("buf already locked"); /* TBD: what if new buffer direction is not same as previous? @@ -2052,7 +2050,6 @@ static int unlock_internal(struct ubwcp_buf *buf, enum dma_data_direction dir, b ubwcp_buf_desc_free(buf->ubwcp, buf->desc); buf->desc = NULL; } - buf->locked = false; return ret; } @@ -2101,13 +2098,13 @@ static int ubwcp_unlock(struct dma_buf *dmabuf, enum dma_data_direction dir) return -EPERM; } - if (!buf->locked) { + mutex_lock(&buf->lock); + if (!buf->lock_count) { ERR("unlock() called on buffer which not in locked state"); trace_ubwcp_unlock_end(dmabuf); + mutex_unlock(&buf->lock); return -1; } - - mutex_lock(&buf->lock); ret = unlock_internal(buf, dir, false); mutex_unlock(&buf->lock); trace_ubwcp_unlock_end(dmabuf); @@ -2204,7 +2201,7 @@ int ubwcp_set_perm_range_translation(struct dma_buf *dmabuf, bool enable) /* if "disable" and we have allocated a desc and it is not being * used currently, release it */ - if (!enable && buf->desc && !buf->locked) { + if (!enable && buf->desc && !buf->lock_count) { ubwcp_buf_desc_free(buf->ubwcp, buf->desc); buf->desc = NULL; @@ -2257,7 +2254,7 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) mutex_lock(&buf->lock); is_non_lin_buf = (buf->buf_attr.image_format != UBWCP_LINEAR); - if (buf->locked) { + if (buf->lock_count) { DBG("free() called without unlock. unlock()'ing first..."); ret = unlock_internal(buf, buf->lock_dir, true); if (ret) From cce20f5e49a60f405501484443b52b613d8426fb Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Thu, 27 Apr 2023 21:11:41 -0700 Subject: [PATCH 25/35] ubwcp: set attribute path cleanup Ensure buffer size is sufficient during re-configuration. Ensure unused ioctl values are set to 0. Fix use of uninitialized value when only plane-1 data is present. Register mmap ula pa only if ubwcp configuration is successful. Clarify failure handling of set attribute call. Cleanup related code. Change-Id: Ia6ac54e13aa2be1c4a1dc2d4b4a7715dad3aa142 Signed-off-by: Amol Jadi --- ubwcp/include/kernel/ubwcp.h | 5 ++ ubwcp/ubwcp_main.c | 139 ++++++++++++++++------------------- 2 files changed, 67 insertions(+), 77 deletions(-) diff --git a/ubwcp/include/kernel/ubwcp.h b/ubwcp/include/kernel/ubwcp.h index 23c683f8e5..f91b26d402 100644 --- a/ubwcp/include/kernel/ubwcp.h +++ b/ubwcp/include/kernel/ubwcp.h @@ -31,6 +31,11 @@ int ubwcp_get_hw_version(struct ubwcp_ioctl_hw_version *ver); * ubwcp_lock(). Attributes can be configured multiple times, * but only during unlocked state. * + * Upon error, buffer will be in undefined state. + * There is no guarantee that previously set attributes will be retained. + * Caller could retry set attributes, but must not reuse buffer + * until a successful set attribute call is done. + * * @param dmabuf : ptr to the dma buf * @param attr : buffer attributes to set * diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index 4ae74aa8b0..f4b2de0626 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -1174,10 +1174,7 @@ err: return ret; } -/* calculate ULA buffer parms - * TBD: how do we make sure uv_start address (not the offset) - * is aligned per requirement: cache line - */ +/* calculate ULA buffer parms */ static int ubwcp_calc_ula_params(struct ubwcp_driver *ubwcp, struct ubwcp_buffer_attrs *attr, size_t *ula_size, @@ -1211,6 +1208,7 @@ static int ubwcp_calc_ula_params(struct ubwcp_driver *ubwcp, */ missing_plane = missing_plane_from_format(attr->image_format); + DBG_BUF_ATTR("ula params -->"); DBG_BUF_ATTR("ioctl_image_format : %d, std_format: %d", attr->image_format, format); DBG_BUF_ATTR("planes_in_format : %d", planes); DBG_BUF_ATTR("missing_plane : %d", missing_plane); @@ -1246,13 +1244,8 @@ static int ubwcp_calc_ula_params(struct ubwcp_driver *ubwcp, } } - //TBD: cleanup - *ula_size = size; - DBG_BUF_ATTR("Before page align: Total ULA_Size: %d (0x%x) (planes + planar padding)", - *ula_size, *ula_size); *ula_size = UBWCP_ALIGN(size, 4096); - DBG_BUF_ATTR("After page align : Total ULA_Size: %d (0x%x) (planes + planar padding)", - *ula_size, *ula_size); + DBG_BUF_ATTR("ULA_Size: %zu (0x%x) (before 4K align: %zu)", *ula_size, *ula_size, size); return 0; } @@ -1274,12 +1267,19 @@ static int ubwcp_calc_ubwcp_buf_params(struct ubwcp_driver *ubwcp, /* convert ioctl image format to standard image format */ format = to_std_format(attr->image_format); missing_plane = missing_plane_from_format(attr->image_format); - planes = planes_in_format(format); //pass in 0 (RGB) should return 1 + planes = planes_in_format(format); + DBG_BUF_ATTR("ubwcp params -->"); DBG_BUF_ATTR("ioctl_image_format : %d, std_format: %d", attr->image_format, format); DBG_BUF_ATTR("planes_in_format : %d", planes); DBG_BUF_ATTR("missing_plane : %d", missing_plane); + *md_p0 = 0; + *pd_p0 = 0; + *md_p1 = 0; + *pd_p1 = 0; + *stride_tp10_b = 0; + if (!missing_plane) { *md_p0 = metadata_buf_sz(ubwcp, format, attr->width, attr->height, 0); *pd_p0 = pixeldata_buf_sz(ubwcp, format, attr->width, attr->height, 0); @@ -1289,23 +1289,17 @@ static int ubwcp_calc_ubwcp_buf_params(struct ubwcp_driver *ubwcp, } } else { if (missing_plane == 1) { - *md_p0 = 0; - *pd_p0 = 0; *md_p1 = metadata_buf_sz(ubwcp, format, attr->width, attr->height, 1); *pd_p1 = pixeldata_buf_sz(ubwcp, format, attr->width, attr->height, 1); } else { *md_p0 = metadata_buf_sz(ubwcp, format, attr->width, attr->height, 0); *pd_p0 = pixeldata_buf_sz(ubwcp, format, attr->width, attr->height, 0); - *md_p1 = 0; - *pd_p1 = 0; } } if (format == TP10) { stride_tp10_p = UBWCP_ALIGN(attr->width, 192); *stride_tp10_b = (stride_tp10_p/3) + stride_tp10_p; - } else { - *stride_tp10_b = 0; } return 0; @@ -1372,18 +1366,27 @@ static void ubwcp_dma_unmap(struct ubwcp_buf *buf) } } +static bool verify_dma_buf_size(struct ubwcp_buf *buf, size_t min_size) +{ + size_t dma_len; + + dma_len = sg_dma_len(buf->sgt->sgl); + if (dma_len < min_size) { + ERR("dma len: %zu is less than min ubwcp buffer size: %zu", dma_len, min_size); + return false; + } else + return true; +} /* dma map ubwcp buffer */ static int ubwcp_dma_map(struct ubwcp_buf *buf, struct device *dev, - size_t iova_min_size, dma_addr_t *iova) { int ret = 0; struct dma_buf *dma_buf = buf->dma_buf; struct dma_buf_attachment *attachment; struct sg_table *sgt; - size_t dma_len; /* Map buffer to SMMU and get IOVA */ attachment = dma_buf_attach(dma_buf, dev); @@ -1408,14 +1411,6 @@ static int ubwcp_dma_map(struct ubwcp_buf *buf, goto err_unmap; } - /* ensure that dma_buf is big enough for the new attrs */ - dma_len = sg_dma_len(sgt->sgl); - if (dma_len < iova_min_size) { - ERR("dma len: %d is less than min ubwcp buffer size: %d", - dma_len, iova_min_size); - goto err_unmap; - } - *iova = sg_dma_address(sgt->sgl); buf->attachment = attachment; buf->sgt = sgt; @@ -1500,13 +1495,9 @@ static void print_mmdata_desc(struct ubwcp_hw_meta_metadata *mmdata) /* set buffer attributes: * Failure: - * If a call to ubwcp_set_buf_attrs() fails, any attributes set from a previously - * successful ubwcp_set_buf_attrs() will be also removed. Thus, - * ubwcp_set_buf_attrs() implicitly does "unset previous attributes" and - * then "try to set these new attributes". - * - * The result of a failed call to ubwcp_set_buf_attrs() will leave the buffer - * in a linear mode, NOT with attributes from earlier successful call. + * This call may fail for multiple reasons and it will leave the buffer in an undefined state. + * In some situations it may leave the buffer in linear mapped state, and in other situations it + * may leave the buffer in previously set attributes state. */ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) { @@ -1570,32 +1561,21 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) mmdata = &buf->mmdata; is_non_lin_buf = (buf->buf_attr.image_format != UBWCP_LINEAR); - //TBD: now that we have single exit point for all errors, - //we can limit this call to error only? - //also see if this can be part of reset_buf_attrs() - DBG_BUF_ATTR("resetting mmap to linear"); - /* remove any earlier dma buf mmap configuration */ - ret = ubwcp->mmap_config_fptr(buf->dma_buf, true, 0, 0); - if (ret) { - ERR("dma_buf_mmap_config() failed: %d", ret); + if (!ubwcp_buf_attrs_valid(ubwcp, attr)) { + ERR("Invalid buf attrs"); goto unlock; } - if (!ubwcp_buf_attrs_valid(ubwcp, attr)) { - ERR("Invalid buf attrs"); - goto err; + /* note: this also checks if buf is mmap'ed */ + ret = ubwcp->mmap_config_fptr(buf->dma_buf, true, 0, 0); + if (ret) { + ERR("dma_buf_mmap_config(0,0) failed: %d", ret); + goto unlock; } if (attr->image_format == UBWCP_LINEAR) { DBG_BUF_ATTR("Linear format requested"); - - /* linear format request with permanent range xlation doesn't - * make sense. need to define behavior if this happens. - * note: with perm set, desc is allocated to this buffer. - */ - //TBD: UBWCP_ASSERT(!buf->perm); - if (buf->buf_attr_set) reset_buf_attrs(buf); @@ -1615,35 +1595,27 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) std_image_format = to_std_format(attr->image_format); if (std_image_format == STD_IMAGE_FORMAT_INVALID) { ERR("Unable to map ioctl image format to std image format"); - goto err; + goto unlock; } /* Calculate uncompressed-buffer size. */ - DBG_BUF_ATTR(""); - DBG_BUF_ATTR(""); - DBG_BUF_ATTR("Calculating ula params -->"); ret = ubwcp_calc_ula_params(ubwcp, attr, &ula_size, &ula_y_plane_size, &uv_start_offset); if (ret) { ERR("ubwcp_calc_ula_params() failed: %d", ret); - goto err; + goto unlock; } ret = ubwcp_validate_uv_align(ubwcp, attr, ula_y_plane_size, uv_start_offset); if (ret) { ERR("ubwcp_validate_uv_align() failed: %d", ret); - goto err; + goto unlock; } - DBG_BUF_ATTR(""); - DBG_BUF_ATTR(""); - DBG_BUF_ATTR("Calculating ubwcp params -->"); - ret = ubwcp_calc_ubwcp_buf_params(ubwcp, attr, - &metadata_p0, &pixeldata_p0, - &metadata_p1, &pixeldata_p1, - &stride_tp10_b); + ret = ubwcp_calc_ubwcp_buf_params(ubwcp, attr, &metadata_p0, &pixeldata_p0, &metadata_p1, + &pixeldata_p1, &stride_tp10_b); if (ret) { ERR("ubwcp_calc_buf_params() failed: %d", ret); - goto err; + goto unlock; } iova_min_size = metadata_p0 + pixeldata_p0 + metadata_p1 + pixeldata_p1; @@ -1674,19 +1646,10 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) DBG_BUF_ATTR("Allocated ULA_PA: 0x%p of size: 0x%zx", ula_pa, ula_size); DBG_BUF_ATTR(""); - /* inform ULA-PA to dma-heap: needed for dma-heap to do CMOs later on */ - DBG_BUF_ATTR("Calling mmap_config(): ULA_PA: 0x%p size: 0x%zx", ula_pa, ula_size); - ret = ubwcp->mmap_config_fptr(buf->dma_buf, false, buf->ula_pa, - buf->ula_size); - if (ret) { - ERR("dma_buf_mmap_config() failed: %d", ret); - goto err; - } - /* dma map only the first time attribute is set */ if (!buf->buf_attr_set) { /* linear -> ubwcp. map ubwcp buffer */ - ret = ubwcp_dma_map(buf, ubwcp->dev_buf_cb, iova_min_size, &iova_base); + ret = ubwcp_dma_map(buf, ubwcp->dev_buf_cb, &iova_base); if (ret) { ERR("ubwcp_dma_map() failed: %d", ret); goto err; @@ -1695,6 +1658,9 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) iova_base, iova_min_size, iova_base + iova_min_size); } + if(!verify_dma_buf_size(buf, iova_min_size)) + goto err; + uv_start = ula_pa + uv_start_offset; if (!IS_ALIGNED(uv_start, 64)) { ERR("ERROR: uv_start is NOT aligned to cache line"); @@ -1751,9 +1717,18 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) goto err; } + /* inform ULA-PA to dma-heap */ + DBG_BUF_ATTR("Calling mmap_config(): ULA_PA: 0x%p size: 0x%zx", ula_pa, ula_size); + ret = ubwcp->mmap_config_fptr(buf->dma_buf, false, buf->ula_pa, buf->ula_size); + if (ret) { + ERR("dma_buf_mmap_config() failed: %d", ret); + if (!is_non_lin_buf) + dec_num_non_lin_buffers(ubwcp); + goto err; + } + buf->buf_attr = *attr; buf->buf_attr_set = true; - //TBD: UBWCP_ASSERT(!buf->perm); mutex_unlock(&buf->lock); trace_ubwcp_set_buf_attrs_end(dmabuf); return 0; @@ -2325,6 +2300,16 @@ static long ubwcp_ioctl(struct file *file, unsigned int ioctl_num, unsigned long return -EFAULT; } DBG("IOCTL : SET_BUF_ATTR: fd = %d", buf_attr_ioctl.fd); + + if (buf_attr_ioctl.attr.unused1 || buf_attr_ioctl.attr.unused2 + || buf_attr_ioctl.attr.unused3 || buf_attr_ioctl.attr.unused4 + || buf_attr_ioctl.attr.unused5 || buf_attr_ioctl.attr.unused6 + || buf_attr_ioctl.attr.unused7 || buf_attr_ioctl.attr.unused8 + || buf_attr_ioctl.attr.unused9) { + ERR("ERROR: buf attr unused values must be set to 0"); + return -EINVAL; + } + return ubwcp_set_buf_attrs_ioctl(&buf_attr_ioctl); case UBWCP_IOCTL_GET_HW_VER: From 9c554efeb164d027caaded261f890597e0f87cb3 Mon Sep 17 00:00:00 2001 From: Anthony Adamo Date: Mon, 1 May 2023 11:33:57 -0700 Subject: [PATCH 26/35] ubwcp: Add DLKM flag to guard compilation of ubwcp.ko qmaa mode exposes dependencies on ubwcp.ko compilation need to guard compilation of ubqcp.ko by dlkm flag Change-Id: I2e171f2ae837b8ca7aa3e1e0d62731d4d3dde750 Signed-off-by: Anthony Adamo --- ubwcp/Android.mk | 2 ++ ubwcp_kernel_product_board.mk | 3 ++- ubwcp_kernel_vendor_board.mk | 3 ++- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ubwcp/Android.mk b/ubwcp/Android.mk index b0761681f6..547a185cf7 100644 --- a/ubwcp/Android.mk +++ b/ubwcp/Android.mk @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only +ifneq ($(TARGET_KERNEL_DLKM_DISABLE), true) LOCAL_PATH := $(call my-dir) LOCAL_MODULE_DDK_BUILD := true @@ -11,3 +12,4 @@ LOCAL_EXPORT_KO_INCLUDE_DIRS += $(LOCAL_PATH)/include/kernel LOCAL_MODULE := ubwcp.ko LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk +endif diff --git a/ubwcp_kernel_product_board.mk b/ubwcp_kernel_product_board.mk index 1e68affb4d..8086125abe 100644 --- a/ubwcp_kernel_product_board.mk +++ b/ubwcp_kernel_product_board.mk @@ -1,2 +1,3 @@ +ifneq ($(TARGET_KERNEL_DLKM_DISABLE), true) PRODUCT_PACKAGES += ubwcp.ko - +endif diff --git a/ubwcp_kernel_vendor_board.mk b/ubwcp_kernel_vendor_board.mk index 8f8535ebd5..2390268536 100644 --- a/ubwcp_kernel_vendor_board.mk +++ b/ubwcp_kernel_vendor_board.mk @@ -1,2 +1,3 @@ +ifneq ($(TARGET_KERNEL_DLKM_DISABLE), true) BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ubwcp.ko - +endif From 21aed34bbd20af4f1df65eb238d9759c839338ce Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Wed, 3 May 2023 13:02:36 -0700 Subject: [PATCH 27/35] ubwcp: free path cleanup Unlock before free. Keep dmabuf ref until complete processing is done during set attr. Change-Id: Idf0739bc9571cba91add8299f7b27f9a7c51e819 Signed-off-by: Amol Jadi --- ubwcp/ubwcp_main.c | 41 +++++++++++++++-------------------------- 1 file changed, 15 insertions(+), 26 deletions(-) diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index f4b2de0626..c19e9b9b0f 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -344,28 +344,6 @@ static int ubwcp_power(struct ubwcp_driver *ubwcp, bool enable) return ret; } - -/* get dma_buf ptr for the given dma_buf fd */ -static struct dma_buf *ubwcp_dma_buf_fd_to_dma_buf(int dma_buf_fd) -{ - struct dma_buf *dmabuf; - - /* TBD: dma_buf_get() results in taking ref to buf and it won't ever get - * free'ed until ref count goes to 0. So we must reduce the ref count - * immediately after we find our corresponding ubwcp_buf. - */ - dmabuf = dma_buf_get(dma_buf_fd); - if (IS_ERR(dmabuf)) { - ERR("dmabuf ptr not found for dma_buf_fd = %d", dma_buf_fd); - return NULL; - } - - dma_buf_put(dmabuf); - - return dmabuf; -} - - /* get ubwcp_buf corresponding to the given dma_buf */ static struct ubwcp_buf *dma_buf_to_ubwcp_buf(struct dma_buf *dmabuf) { @@ -1755,11 +1733,18 @@ EXPORT_SYMBOL(ubwcp_set_buf_attrs); /* Set buffer attributes ioctl */ static int ubwcp_set_buf_attrs_ioctl(struct ubwcp_ioctl_buffer_attrs *attr_ioctl) { + int ret; struct dma_buf *dmabuf; - dmabuf = ubwcp_dma_buf_fd_to_dma_buf(attr_ioctl->fd); + dmabuf = dma_buf_get(attr_ioctl->fd); + if (IS_ERR(dmabuf)) { + ERR("dmabuf ptr not found for dma_buf_fd = %d", dma_buf_fd); + return PTR_ERR(dmabuf); + } - return ubwcp_set_buf_attrs(dmabuf, &attr_ioctl->attr); + ret = ubwcp_set_buf_attrs(dmabuf, &attr_ioctl->attr); + dma_buf_put(dmabuf); + return ret; } @@ -2230,7 +2215,7 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) is_non_lin_buf = (buf->buf_attr.image_format != UBWCP_LINEAR); if (buf->lock_count) { - DBG("free() called without unlock. unlock()'ing first..."); + DBG("free before unlock (lock_count: %d). unlock()'ing first", buf->lock_count); ret = unlock_internal(buf, buf->lock_dir, true); if (ret) ERR("unlock_internal(): failed : %d, but continuing free()", ret); @@ -2238,7 +2223,10 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) /* if we are still holding a desc, release it. this can happen only if perm == true */ if (buf->desc) { - WARN_ON(!buf->perm); /* TBD: change to BUG() later...*/ + if (!buf->perm) { + ubwcp->state = UBWCP_STATE_FAULT; + WARN_ON(true); + } ubwcp_buf_desc_free(buf->ubwcp, buf->desc); buf->desc = NULL; } @@ -2250,6 +2238,7 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) hash_del(&buf->hnode); spin_unlock_irqrestore(&ubwcp->buf_table_lock, flags); + mutex_unlock(&buf->lock); kfree(buf); if (is_non_lin_buf) From 01e22caa564e677cb8ebabfca962f208a00f98b8 Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Wed, 3 May 2023 18:55:09 -0700 Subject: [PATCH 28/35] ubwcp: misc cleanup Removed unused scm calls. Copy to userspace only on successful hw version call. Cleanup during probe failure. During driver removal, update regs only if power ON was successful. Change-Id: I7acc165bdb80b32e377c7c5704fa3863a3819b39 Signed-off-by: Amol Jadi --- ubwcp/ubwcp_hw.c | 27 +---------------- ubwcp/ubwcp_main.c | 75 +++++++++++++++++++++++++--------------------- 2 files changed, 42 insertions(+), 60 deletions(-) diff --git a/ubwcp/ubwcp_hw.c b/ubwcp/ubwcp_hw.c index 8d32213fb6..3b384426b9 100644 --- a/ubwcp/ubwcp_hw.c +++ b/ubwcp/ubwcp_hw.c @@ -64,25 +64,7 @@ MODULE_LICENSE("GPL"); #define UBWCP_DEBUG_REG_RW /* read/write register */ -#if defined(UBWCP_USE_SMC) -#define UBWCP_REG_READ(_base, _offset) \ - ({u32 _reg = 0; int _ret; \ - _ret = qcom_scm_io_readl((phys_addr_t)(_base + _offset), &_reg); \ - if (_ret) \ - DBG("scm_read() failed: %d", _ret); \ - else \ - DBG("scm_read() : %p + 0x%x -> 0x%08x", _base, _offset, _reg); \ - _reg; }) - -#define UBWCP_REG_WRITE(_base, _offset, _value) \ - {int _ret;\ - _ret = qcom_scm_io_writel((phys_addr_t)(_base + _offset), _value); \ - if (_ret) \ - DBG("scm_write() failed: %d", _ret); \ - else \ - DBG("scm_write(): %p + 0x%x <- 0x%08x", _base, _offset, _value); \ - } -#elif defined(UBWCP_DEBUG_REG_RW) +#if defined(UBWCP_DEBUG_REG_RW) #define UBWCP_REG_READ(_base, _offset) \ ({u32 _reg; \ _reg = ioread32(_base + _offset); \ @@ -94,16 +76,9 @@ MODULE_LICENSE("GPL"); DBG("WRITE: 0x%x <- 0x%08x", _offset, _value); \ iowrite32(_value, _base + _offset); \ } -#elif defined(UBWCP_DUMMY_REG_RW) -/* do nothing */ -#define UBWCP_REG_READ(_base, _offset) ((_base + _offset) ? 0x0 : 0x0) -#define UBWCP_REG_WRITE(_base, _offset, _value) ((_base + _offset + _value) ? 0x0 : 0x0) - #else - #define UBWCP_REG_READ(_base, _offset) ioread32(_base + _offset) #define UBWCP_REG_WRITE(_base, _offset, _value) iowrite32(_value, _base + _offset) - #endif #define UBWCP_REG_READ_NO_DBG(_base, _offset) ioread32(_base + _offset) diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index c19e9b9b0f..67d70003a0 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -2303,7 +2303,10 @@ static long ubwcp_ioctl(struct file *file, unsigned int ioctl_num, unsigned long case UBWCP_IOCTL_GET_HW_VER: DBG("IOCTL : GET_HW_VER"); - ubwcp_get_hw_version(&hw_ver); + + if (ubwcp_get_hw_version(&hw_ver)) + return -EINVAL; + if (copy_to_user((void __user *)ioctl_param, &hw_ver, sizeof(hw_ver))) { ERR("ERROR: copy_to_user() failed"); return -EFAULT; @@ -2617,8 +2620,9 @@ static int ubwcp_cdev_init(struct ubwcp_driver *ubwcp) /* create device class (/sys/class/ubwcp_class) */ dev_class = class_create(THIS_MODULE, "ubwcp_class"); if (IS_ERR(dev_class)) { - ERR("class_create() failed"); - return -1; + ret = PTR_ERR(dev_class); + ERR("class_create() failed, ret: %d", ret); + goto err; } /* Create device and register with sysfs @@ -2627,8 +2631,9 @@ static int ubwcp_cdev_init(struct ubwcp_driver *ubwcp) dev_sys = device_create(dev_class, NULL, devt, NULL, UBWCP_DEVICE_NAME); if (IS_ERR(dev_sys)) { - ERR("device_create() failed"); - return -1; + ret = PTR_ERR(dev_sys); + ERR("device_create() failed, ret: %d", ret); + goto err_device_create; } /* register file operations and get cdev */ @@ -2639,14 +2644,22 @@ static int ubwcp_cdev_init(struct ubwcp_driver *ubwcp) */ ret = cdev_add(&ubwcp->cdev, devt, 1); if (ret) { - ERR("cdev_add() failed"); - return -1; + ERR("cdev_add() failed, ret: %d", ret); + goto err_cdev_add; } ubwcp->devt = devt; ubwcp->dev_class = dev_class; ubwcp->dev_sys = dev_sys; return 0; + +err_cdev_add: + device_destroy(dev_class, devt); +err_device_create: + class_destroy(dev_class); +err: + unregister_chrdev_region(devt, UBWCP_NUM_DEVICES); + return ret; } static void ubwcp_cdev_deinit(struct ubwcp_driver *ubwcp) @@ -2967,22 +2980,12 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) ret = dma_set_mask_and_coherent(ubwcp->dev, DMA_BIT_MASK(64)); -#ifdef UBWCP_USE_SMC - { - struct resource res; - - of_address_to_resource(ubwcp_dev->of_node, 0, &res); - ubwcp->base = (void __iomem *) res.start; - DBG("Using SMC calls. base: %p", ubwcp->base); - } -#else ubwcp->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ubwcp->base)) { ERR("devm ioremap() failed: %d", PTR_ERR(ubwcp->base)); return PTR_ERR(ubwcp->base); } DBG("ubwcp->base: %p", ubwcp->base); -#endif ret = of_property_read_u64_index(ubwcp_dev->of_node, "ula_range", 0, &ubwcp->ula_pool_base); if (ret) { @@ -3018,7 +3021,7 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) if (IS_ERR_OR_NULL(ubwcp->vdd)) { ret = PTR_ERR(ubwcp->vdd); ERR("devm_regulator_get() failed: %d", ret); - return -1; + return ret; } ret = ubwcp_init_clocks(ubwcp, ubwcp_dev); @@ -3107,13 +3110,17 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) return ret; err_power_off: - ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_READ_ERROR, false); - ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_WRITE_ERROR, false); - ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_ENCODE_ERROR, false); - ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_DECODE_ERROR, false); + if (!ubwcp_power(ubwcp, true)) { + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_READ_ERROR, false); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_WRITE_ERROR, false); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_ENCODE_ERROR, false); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_DECODE_ERROR, false); + ubwcp_power(ubwcp, false); + } err_pool_add: gen_pool_destroy(ubwcp->ula_pool); err_pool_create: + ubwcp_debugfs_deinit(ubwcp); ubwcp_cdev_deinit(ubwcp); return ret; } @@ -3251,9 +3258,10 @@ static int ubwcp_remove_cb_desc(struct platform_device *pdev) return -1; } - ubwcp_power(ubwcp, true); - ubwcp_hw_set_buf_desc(ubwcp->base, 0x0, 0x0); - ubwcp_power(ubwcp, false); + if (!ubwcp_power(ubwcp, true)) { + ubwcp_hw_set_buf_desc(ubwcp->base, 0x0, 0x0); + ubwcp_power(ubwcp, false); + } ubwcp->state = UBWCP_STATE_INVALID; dma_free_coherent(ubwcp->dev_desc_cb, @@ -3281,18 +3289,17 @@ static int qcom_ubwcp_remove(struct platform_device *pdev) return -1; } - ubwcp_power(ubwcp, true); - ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_READ_ERROR, false); - ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_WRITE_ERROR, false); - ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_ENCODE_ERROR, false); - ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_DECODE_ERROR, false); - ubwcp_power(ubwcp, false); + if (!ubwcp_power(ubwcp, true)) { + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_READ_ERROR, false); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_WRITE_ERROR, false); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_ENCODE_ERROR, false); + ubwcp_hw_interrupt_enable(ubwcp->base, INTERRUPT_DECODE_ERROR, false); + ubwcp_power(ubwcp, false); + } ubwcp->state = UBWCP_STATE_INVALID; - /* before destroying, make sure pool is empty. otherwise pool_destroy() panics. - * TBD: remove this check for production code and let it panic - */ + /* before destroying, make sure pool is empty. otherwise pool_destroy() panics. */ avail = gen_pool_avail(ubwcp->ula_pool); psize = gen_pool_size(ubwcp->ula_pool); if (psize != avail) { From 90d7328ddafdc28cfd890de2fcdf634331a11cb1 Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Mon, 8 May 2023 19:23:00 -0700 Subject: [PATCH 29/35] ubwcp: fix for width and stride comparison Width passed in to validation logic is in pixels and was compared to stride in bytes. Updated to first convert width to bytes before comparison. Change-Id: Idef3d7d4e79109c556f083c9b95629d77f4717fc Signed-off-by: Amol Jadi --- ubwcp/ubwcp_main.c | 60 ++++++++++++++++++++++++---------------------- 1 file changed, 32 insertions(+), 28 deletions(-) diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index 67d70003a0..24526eba17 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -717,6 +717,27 @@ static u32 get_compressed_stride(struct ubwcp_driver *ubwcp, return UBWCP_ALIGN(width, macro_tile_width_p)*pixel_bytes/per_pixel; } +static void +ubwcp_pixel_to_bytes(struct ubwcp_driver *ubwcp, + enum ubwcp_std_image_format format, + u32 width_p, u32 height_p, + u32 *width_b, u32 *height_b) +{ + u16 pixel_bytes; + u16 per_pixel; + struct ubwcp_image_format_info f_info; + struct ubwcp_plane_info p_info; + + f_info = ubwcp->format_info[format]; + p_info = f_info.p_info[0]; + + pixel_bytes = p_info.pixel_bytes; + per_pixel = p_info.per_pixel; + + *width_b = (width_p*pixel_bytes)/per_pixel; + *height_b = (height_p*pixel_bytes)/per_pixel; +} + /* check if linear stride conforms to hw limitations * always returns false for linear image */ @@ -724,13 +745,17 @@ static bool stride_is_valid(struct ubwcp_driver *ubwcp, u16 ioctl_img_fmt, u32 width, u32 lin_stride) { u32 compressed_stride; + u32 width_b; + u32 height_b; enum ubwcp_std_image_format format = to_std_format(ioctl_img_fmt); if (format == STD_IMAGE_FORMAT_INVALID) return false; - if ((lin_stride < width) || (lin_stride > 64*1024)) { - ERR("stride is not valid (width <= stride <= 64K): %d", lin_stride); + ubwcp_pixel_to_bytes(ubwcp, format, width, 0, &width_b, &height_b); + + if ((lin_stride < width_b) || (lin_stride > 64*1024)) { + ERR("Invalid stride: %u width: %u width_b: %u", lin_stride, width, width_b); return false; } @@ -1404,27 +1429,6 @@ err: return ret; } -static void -ubwcp_pixel_to_bytes(struct ubwcp_driver *ubwcp, - enum ubwcp_std_image_format format, - u32 width_p, u32 height_p, - u32 *width_b, u32 *height_b) -{ - u16 pixel_bytes; - u16 per_pixel; - struct ubwcp_image_format_info f_info; - struct ubwcp_plane_info p_info; - - f_info = ubwcp->format_info[format]; - p_info = f_info.p_info[0]; - - pixel_bytes = p_info.pixel_bytes; - per_pixel = p_info.per_pixel; - - *width_b = (width_p*pixel_bytes)/per_pixel; - *height_b = (height_p*pixel_bytes)/per_pixel; -} - static void reset_buf_attrs(struct ubwcp_buf *buf) { struct ubwcp_hw_meta_metadata *mmdata; @@ -2348,14 +2352,14 @@ static long ubwcp_ioctl(struct file *file, unsigned int ioctl_num, unsigned long return -EFAULT; } - format = to_std_format(validate_stride_ioctl.image_format); - if (format == STD_IMAGE_FORMAT_INVALID) { - ERR("ERROR: invalid format: %d", validate_stride_ioctl.image_format); + if (validate_stride_ioctl.unused1 || validate_stride_ioctl.unused2) { + ERR("ERROR: unused values must be set to 0"); return -EINVAL; } - if (validate_stride_ioctl.unused1 || validate_stride_ioctl.unused2) { - ERR("ERROR: unused values must be set to 0"); + format = to_std_format(validate_stride_ioctl.image_format); + if (format == STD_IMAGE_FORMAT_INVALID) { + ERR("ERROR: invalid format: %d", validate_stride_ioctl.image_format); return -EINVAL; } From 10ba8e9e4076e2e39709664545523ed4620395ec Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Thu, 18 May 2023 15:00:48 -0700 Subject: [PATCH 30/35] ubwcp: dma direction update Always invalidate on lock (even for write). This ensures that partial tile writes will not overwrite buffer with dummy data from previously prefetched lines. On subsequent lock calls, in case of write, update direction to bi_directional. This way buffer will get flushed during unlock. Change-Id: If3f1deffefe14af1dbdb2dae0a77b25106d35e5c Signed-off-by: Amol Jadi --- ubwcp/ubwcp_main.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index 24526eba17..d1a19b95a7 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -193,7 +193,7 @@ struct ubwcp_buf { bool perm; struct ubwcp_desc *desc; bool buf_attr_set; - enum dma_data_direction lock_dir; + enum dma_data_direction dma_dir; int lock_count; /* dma_buf info */ @@ -1941,18 +1941,23 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir) } /* Flush/invalidate ULA PA from CPU caches - * TBD: if (dir == READ or BIDIRECTION) //NOT for write - * -- Confirm with Chris if this can be skipped for write + * Always invalidate cache, even when writing. + * Upgrade direction to force invalidate. */ + if (dir == DMA_TO_DEVICE) + dir = DMA_BIDIRECTIONAL; trace_ubwcp_dma_sync_single_for_cpu_start(buf->ula_size); dma_sync_single_for_cpu(ubwcp->dev, buf->ula_pa, buf->ula_size, dir); trace_ubwcp_dma_sync_single_for_cpu_end(buf->ula_size); - buf->lock_dir = dir; + buf->dma_dir = dir; } else { DBG("buf already locked"); - /* TBD: what if new buffer direction is not same as previous? - * must update the dir. + /* For write locks, always upgrade direction to bi_directional. + * A previous read lock will now become write lock. + * This will ensure a flush when the last unlock comes in. */ + if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL)) + buf->dma_dir = DMA_BIDIRECTIONAL; } buf->lock_count++; DBG("new lock_count: %d", buf->lock_count); @@ -1985,6 +1990,9 @@ static int unlock_internal(struct ubwcp_buf *buf, enum dma_data_direction dir, b buf->lock_count = 0; DBG("Forced lock_count: %d", buf->lock_count); } else { + /* for write unlocks, remember the direction so we flush on last unlock */ + if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL)) + buf->dma_dir = DMA_BIDIRECTIONAL; buf->lock_count--; DBG("new lock_count: %d", buf->lock_count); if (buf->lock_count) { @@ -1996,9 +2004,8 @@ static int unlock_internal(struct ubwcp_buf *buf, enum dma_data_direction dir, b ubwcp = buf->ubwcp; /* Flush/invalidate ULA PA from CPU caches */ - //TBD: if (dir == WRITE or BIDIRECTION) trace_ubwcp_dma_sync_single_for_device_start(buf->ula_size); - dma_sync_single_for_device(ubwcp->dev, buf->ula_pa, buf->ula_size, dir); + dma_sync_single_for_device(ubwcp->dev, buf->ula_pa, buf->ula_size, buf->dma_dir); trace_ubwcp_dma_sync_single_for_device_end(buf->ula_size); /* disable range check */ @@ -2220,7 +2227,7 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) if (buf->lock_count) { DBG("free before unlock (lock_count: %d). unlock()'ing first", buf->lock_count); - ret = unlock_internal(buf, buf->lock_dir, true); + ret = unlock_internal(buf, buf->dma_dir, true); if (ret) ERR("unlock_internal(): failed : %d, but continuing free()", ret); } From 4ea2f22f83b113efc9a3706ba00cfa7de6b7cb04 Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Fri, 19 May 2023 10:56:42 -0700 Subject: [PATCH 31/35] ubwcp: override reg and other cleanup Removed writing to override reg as default value is the same. Use defines for hardcoded attribute limit values. Change-Id: Ief67b048d5f94eeb260476e6c8aba5a5bf346436 Signed-off-by: Amol Jadi --- ubwcp/ubwcp_hw.c | 3 --- ubwcp/ubwcp_main.c | 33 +++++++++++++++++---------------- 2 files changed, 17 insertions(+), 19 deletions(-) diff --git a/ubwcp/ubwcp_hw.c b/ubwcp/ubwcp_hw.c index 3b384426b9..8ef154f145 100644 --- a/ubwcp/ubwcp_hw.c +++ b/ubwcp/ubwcp_hw.c @@ -335,9 +335,6 @@ void ubwcp_hw_one_time_init(void __iomem *base) { u32 reg; - /* hack: set dataless hazard override bit */ - UBWCP_REG_WRITE(base, OVERRIDE, 0x2000); - /* Spare reg config: set bit-9: SCC & bit-1: padding */ reg = UBWCP_REG_READ(base, SPARE); reg |= BIT(9) | BIT(1); diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index d1a19b95a7..6ed17adadf 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -68,6 +68,13 @@ MODULE_IMPORT_NS(DMA_BUF); #define UBWCP_SYNC_GRANULE 0x4000000L /* 64 MB */ +/* Max values for attributes */ +#define MAX_ATTR_WIDTH (10*1024) +#define MAX_ATTR_HEIGHT (10*1024) +#define MAX_ATTR_STRIDE (64*1024) +#define MAX_ATTR_PLANAR_PAD 4096 +#define MAX_ATTR_SCANLN_HT_DELTA (32*1024) + enum ula_remove_mem_status { ULA_REMOVE_MEM_SUCCESS = 0, ULA_REMOVE_MEM_ABORTED = 1 @@ -78,8 +85,7 @@ struct ubwcp_desc { void *ptr; }; -/* TBD: confirm size of width/height */ -struct ubwcp_dimension { +struct tile_dimension { u16 width; u16 height; }; @@ -87,8 +93,8 @@ struct ubwcp_dimension { struct ubwcp_plane_info { u16 pixel_bytes; u16 per_pixel; - struct ubwcp_dimension tilesize_p; /* pixels */ - struct ubwcp_dimension macrotilesize_p; /* pixels */ + struct tile_dimension tilesize_p; /* pixels */ + struct tile_dimension macrotilesize_p; /* pixels */ }; struct ubwcp_image_format_info { @@ -445,7 +451,6 @@ static void ula_unmap(struct ubwcp_driver *ubwcp) static void ula_sync_for_cpu(struct device *dev, u64 addr, unsigned long size) { - DBG("Partial sync offset:0x%lx size:0x%lx", addr, size); trace_ubwcp_dma_sync_single_for_cpu_start(size); dma_sync_single_for_cpu(dev, addr, size, DMA_BIDIRECTIONAL); trace_ubwcp_dma_sync_single_for_cpu_end(size); @@ -754,7 +759,7 @@ static bool stride_is_valid(struct ubwcp_driver *ubwcp, ubwcp_pixel_to_bytes(ubwcp, format, width, 0, &width_b, &height_b); - if ((lin_stride < width_b) || (lin_stride > 64*1024)) { + if ((lin_stride < width_b) || (lin_stride > MAX_ATTR_STRIDE)) { ERR("Invalid stride: %u width: %u width_b: %u", lin_stride, width, width_b); return false; } @@ -827,14 +832,12 @@ static bool ubwcp_buf_attrs_valid(struct ubwcp_driver *ubwcp, struct ubwcp_buffe goto err; } - //TBD: some upper limit for width? - if (attr->width > 10*1024) { + if (attr->width > MAX_ATTR_WIDTH) { ERR("width is invalid (above upper limit): %d", attr->width); goto err; } - //TBD: some upper limit for height? - if (attr->height > 10*1024) { + if (attr->height > MAX_ATTR_HEIGHT) { ERR("height is invalid (above upper limit): %d", attr->height); goto err; } @@ -846,15 +849,14 @@ static bool ubwcp_buf_attrs_valid(struct ubwcp_driver *ubwcp, struct ubwcp_buffe } if ((attr->scanlines < attr->height) || - (attr->scanlines > attr->height + 32*1024)) { + (attr->scanlines > attr->height + MAX_ATTR_SCANLN_HT_DELTA)) { ERR("scanlines is not valid - height: %d scanlines: %d", attr->height, attr->scanlines); goto err; } - if (attr->planar_padding > 4096) { - ERR("planar_padding is not valid. (<= 4096): %d", - attr->planar_padding); + if (attr->planar_padding > MAX_ATTR_PLANAR_PAD) { + ERR("planar_padding is not valid: %d", attr->planar_padding); goto err; } @@ -1316,7 +1318,6 @@ static phys_addr_t ubwcp_ula_alloc(struct ubwcp_driver *ubwcp, size_t size) mutex_lock(&ubwcp->ula_lock); pa = gen_pool_alloc(ubwcp->ula_pool, size); - DBG("addr: %p, size: %zx", pa, size); mutex_unlock(&ubwcp->ula_lock); return pa; } @@ -3059,7 +3060,7 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) ubwcp_debugfs_init(ubwcp); /* create ULA pool */ - ubwcp->ula_pool = gen_pool_create(12, -1); + ubwcp->ula_pool = gen_pool_create(PAGE_SHIFT, -1); if (!ubwcp->ula_pool) { ERR("failed gen_pool_create()"); ret = -1; From 1a1a0adee1066e7b972c35d30f3b6664728e28de Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Mon, 22 May 2023 12:05:46 -0700 Subject: [PATCH 32/35] ubwcp: refactor code to reduce warn prints and robust error handling Conversion between ioctl/internal/hw image formats is made consistent and eliminated need for warn calls. Ioctl handling is broken down into multiple functions. Change-Id: Ic2786466f5dd7d6b2a380e030f8e04ead67dfe8c Signed-off-by: Amol Jadi --- ubwcp/ubwcp_main.c | 411 ++++++++++++++++++++++++++------------------- 1 file changed, 238 insertions(+), 173 deletions(-) diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index 6ed17adadf..e7fe398bd8 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -110,7 +110,6 @@ enum ubwcp_std_image_format { TP10 = 4, P016 = 5, INFO_FORMAT_LIST_SIZE, - STD_IMAGE_FORMAT_INVALID = 0xFF }; enum ubwcp_state { @@ -654,34 +653,67 @@ static void dump_attributes(struct ubwcp_buffer_attrs *attr) DBG_BUF_ATTR(""); } -static enum ubwcp_std_image_format to_std_format(u16 ioctl_image_format) +static int to_std_format(u16 ioctl_image_format, enum ubwcp_std_image_format *format) { switch (ioctl_image_format) { case UBWCP_RGBA8888: - return RGBA; + *format = RGBA; + return 0; case UBWCP_NV12: case UBWCP_NV12_Y: case UBWCP_NV12_UV: - return NV12; + *format = NV12; + return 0; case UBWCP_NV124R: case UBWCP_NV124R_Y: case UBWCP_NV124R_UV: - return NV124R; + *format = NV124R; + return 0; case UBWCP_TP10: case UBWCP_TP10_Y: case UBWCP_TP10_UV: - return TP10; + *format = TP10; + return 0; case UBWCP_P010: case UBWCP_P010_Y: case UBWCP_P010_UV: - return P010; + *format = P010; + return 0; case UBWCP_P016: case UBWCP_P016_Y: case UBWCP_P016_UV: - return P016; + *format = P016; + return 0; default: - WARN(1, "Fix this!!!"); - return STD_IMAGE_FORMAT_INVALID; + ERR("Failed to convert ioctl image format to std format: %d", ioctl_image_format); + return -1; + } +} + +static int std_to_hw_img_fmt(enum ubwcp_std_image_format format, u16 *hw_fmt) +{ + switch (format) { + case RGBA: + *hw_fmt = HW_BUFFER_FORMAT_RGBA; + return 0; + case NV12: + *hw_fmt = HW_BUFFER_FORMAT_NV12; + return 0; + case NV124R: + *hw_fmt = HW_BUFFER_FORMAT_NV124R; + return 0; + case P010: + *hw_fmt = HW_BUFFER_FORMAT_P010; + return 0; + case TP10: + *hw_fmt = HW_BUFFER_FORMAT_TP10; + return 0; + case P016: + *hw_fmt = HW_BUFFER_FORMAT_P016; + return 0; + default: + ERR("Failed to convert std image format to hw format: %d", format); + return -1; } } @@ -747,15 +779,11 @@ ubwcp_pixel_to_bytes(struct ubwcp_driver *ubwcp, * always returns false for linear image */ static bool stride_is_valid(struct ubwcp_driver *ubwcp, - u16 ioctl_img_fmt, u32 width, u32 lin_stride) + enum ubwcp_std_image_format format, u32 width, u32 lin_stride) { u32 compressed_stride; u32 width_b; u32 height_b; - enum ubwcp_std_image_format format = to_std_format(ioctl_img_fmt); - - if (format == STD_IMAGE_FORMAT_INVALID) - return false; ubwcp_pixel_to_bytes(ubwcp, format, width, 0, &width_b, &height_b); @@ -810,11 +838,27 @@ static bool ioctl_format_is_valid(u16 ioctl_image_format) /* validate buffer attributes */ static bool ubwcp_buf_attrs_valid(struct ubwcp_driver *ubwcp, struct ubwcp_buffer_attrs *attr) { + enum ubwcp_std_image_format format; + + if (attr->unused1 || attr->unused2 || attr->unused3 || attr->unused4 || attr->unused5 || + attr->unused6 || attr->unused7 || attr->unused8 || attr->unused9) { + ERR("buf attr unused values must be set to 0"); + goto err; + } + if (!ioctl_format_is_valid(attr->image_format)) { ERR("invalid image format: %d", attr->image_format); goto err; } + /* rest of the fields are ignored for linear format */ + if (attr->image_format == UBWCP_LINEAR) { + goto valid; + } + + if (to_std_format(attr->image_format, &format)) + goto err; + if (attr->major_ubwc_ver || attr->minor_ubwc_ver) { ERR("major/minor ubwc ver must be 0. major: %d minor: %d", attr->major_ubwc_ver, attr->minor_ubwc_ver); @@ -842,11 +886,10 @@ static bool ubwcp_buf_attrs_valid(struct ubwcp_driver *ubwcp, struct ubwcp_buffe goto err; } - if (attr->image_format != UBWCP_LINEAR) - if(!stride_is_valid(ubwcp, attr->image_format, attr->width, attr->stride)) { - ERR("stride is invalid: %d", attr->stride); - goto err; - } + if(!stride_is_valid(ubwcp, format, attr->width, attr->stride)) { + ERR("stride is invalid: %d", attr->stride); + goto err; + } if ((attr->scanlines < attr->height) || (attr->scanlines > attr->height + MAX_ATTR_SCANLN_HT_DELTA)) { @@ -887,6 +930,7 @@ static bool ubwcp_buf_attrs_valid(struct ubwcp_driver *ubwcp, struct ubwcp_buffe goto err; } +valid: dump_attributes(attr); return true; err: @@ -1109,30 +1153,6 @@ static int planes_in_format(enum ubwcp_std_image_format format) return 2; } -static unsigned int ubwcp_get_hw_image_format_value(u16 ioctl_image_format) -{ - enum ubwcp_std_image_format format; - - format = to_std_format(ioctl_image_format); - switch (format) { - case RGBA: - return HW_BUFFER_FORMAT_RGBA; - case NV12: - return HW_BUFFER_FORMAT_NV12; - case NV124R: - return HW_BUFFER_FORMAT_NV124R; - case P010: - return HW_BUFFER_FORMAT_P010; - case TP10: - return HW_BUFFER_FORMAT_TP10; - case P016: - return HW_BUFFER_FORMAT_P016; - default: - WARN(1, "Fix this!!!!!"); - return 0; - } -} - static int ubwcp_validate_uv_align(struct ubwcp_driver *ubwcp, struct ubwcp_buffer_attrs *attr, size_t ula_y_plane_size, @@ -1144,8 +1164,13 @@ static int ubwcp_validate_uv_align(struct ubwcp_driver *ubwcp, int y_tile_height; int planes; + enum ubwcp_std_image_format format; + ret = to_std_format(attr->image_format, &format); + if (ret) + goto err; + /* Only validate UV align if there is both a Y and UV plane */ - planes = planes_in_format(to_std_format(attr->image_format)); + planes = planes_in_format(format); if (planes != 2) return 0; @@ -1160,7 +1185,7 @@ static int ubwcp_validate_uv_align(struct ubwcp_driver *ubwcp, /* * Check that UV plane does not overlap with any of the Y plane’s tiles */ - y_tile_height = get_tile_height(ubwcp, to_std_format(attr->image_format), 0); + y_tile_height = get_tile_height(ubwcp, format, 0); y_tile_align_bytes = y_tile_height * attr->stride; ula_y_plane_size_align = ((ula_y_plane_size + y_tile_align_bytes - 1) / y_tile_align_bytes) * y_tile_align_bytes; @@ -1193,15 +1218,16 @@ static int ubwcp_calc_ula_params(struct ubwcp_driver *ubwcp, u32 stride; u32 scanlines; u32 planar_padding; + int ret; + + ret = to_std_format(attr->image_format, &format); + if (ret) + return ret; stride = attr->stride; scanlines = attr->scanlines; planar_padding = attr->planar_padding; - /* convert ioctl image format to standard image format */ - format = to_std_format(attr->image_format); - - /* Number of "expected" planes in "the standard defined" image format */ planes = planes_in_format(format); @@ -1266,11 +1292,14 @@ static int ubwcp_calc_ubwcp_buf_params(struct ubwcp_driver *ubwcp, int missing_plane; enum ubwcp_std_image_format format; size_t stride_tp10_p; + int ret; FENTRY(); - /* convert ioctl image format to standard image format */ - format = to_std_format(attr->image_format); + ret = to_std_format(attr->image_format, &format); + if (ret) + return ret; + missing_plane = missing_plane_from_format(attr->image_format); planes = planes_in_format(format); @@ -1506,32 +1535,41 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) u32 height_b; enum ubwcp_std_image_format std_image_format; bool is_non_lin_buf; + u16 hw_img_format; FENTRY(); trace_ubwcp_set_buf_attrs_start(dmabuf); if (!dmabuf) { ERR("NULL dmabuf input ptr"); - trace_ubwcp_set_buf_attrs_end(dmabuf); - return -EINVAL; + ret = -EINVAL; + goto err_validation; } if (!attr) { ERR("NULL attr ptr"); - trace_ubwcp_set_buf_attrs_end(dmabuf); - return -EINVAL; + ret = -EINVAL; + goto err_validation; } buf = dma_buf_to_ubwcp_buf(dmabuf); if (!buf) { ERR("No corresponding ubwcp_buf for the passed in dma_buf"); - trace_ubwcp_set_buf_attrs_end(dmabuf); - return -EINVAL; + ret = -EINVAL; + goto err_validation; } ubwcp = buf->ubwcp; - if (ubwcp->state != UBWCP_STATE_READY) - return -EPERM; + if (ubwcp->state != UBWCP_STATE_READY) { + ret = EPERM; + goto err_validation; + } + + if (!ubwcp_buf_attrs_valid(ubwcp, attr)) { + ERR("Invalid buf attrs"); + ret = -EINVAL; + goto err_validation; + } mutex_lock(&buf->lock); @@ -1544,11 +1582,6 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) mmdata = &buf->mmdata; is_non_lin_buf = (buf->buf_attr.image_format != UBWCP_LINEAR); - if (!ubwcp_buf_attrs_valid(ubwcp, attr)) { - ERR("Invalid buf attrs"); - goto unlock; - } - /* note: this also checks if buf is mmap'ed */ ret = ubwcp->mmap_config_fptr(buf->dma_buf, true, 0, 0); if (ret) { @@ -1575,12 +1608,16 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) return ret; } - std_image_format = to_std_format(attr->image_format); - if (std_image_format == STD_IMAGE_FORMAT_INVALID) { + if (to_std_format(attr->image_format, &std_image_format)) { ERR("Unable to map ioctl image format to std image format"); goto unlock; } + if (std_to_hw_img_fmt(std_image_format, &hw_img_format)) { + ERR("Unable to map std image format to hw image format"); + goto unlock; + } + /* Calculate uncompressed-buffer size. */ ret = ubwcp_calc_ula_params(ubwcp, attr, &ula_size, &ula_y_plane_size, &uv_start_offset); if (ret) { @@ -1665,7 +1702,7 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) /* create the mmdata descriptor */ memset(mmdata, 0, sizeof(*mmdata)); mmdata->uv_start_addr = CACHE_ADDR(uv_start); - mmdata->format = ubwcp_get_hw_image_format_value(attr->image_format); + mmdata->format = hw_img_format; if (std_image_format != TP10) { mmdata->stride = CACHE_ADDR(stride_b); /* uncompressed stride */ @@ -1727,6 +1764,7 @@ err: } unlock: mutex_unlock(&buf->lock); +err_validation: if (!ret) ret = -1; trace_ubwcp_set_buf_attrs_end(dmabuf); @@ -1734,25 +1772,6 @@ unlock: } EXPORT_SYMBOL(ubwcp_set_buf_attrs); - -/* Set buffer attributes ioctl */ -static int ubwcp_set_buf_attrs_ioctl(struct ubwcp_ioctl_buffer_attrs *attr_ioctl) -{ - int ret; - struct dma_buf *dmabuf; - - dmabuf = dma_buf_get(attr_ioctl->fd); - if (IS_ERR(dmabuf)) { - ERR("dmabuf ptr not found for dma_buf_fd = %d", dma_buf_fd); - return PTR_ERR(dmabuf); - } - - ret = ubwcp_set_buf_attrs(dmabuf, &attr_ioctl->attr); - dma_buf_put(dmabuf); - return ret; -} - - /* Free up the buffer descriptor */ static void ubwcp_buf_desc_free(struct ubwcp_driver *ubwcp, struct ubwcp_desc *desc) { @@ -2274,14 +2293,137 @@ static int ubwcp_close(struct inode *i, struct file *f) return 0; } +static int ioctl_set_buf_attr(struct ubwcp_driver *ubwcp, unsigned long ioctl_param) +{ + int ret; + struct dma_buf *dmabuf; + struct ubwcp_ioctl_buffer_attrs buf_attr_ioctl; + + if (copy_from_user(&buf_attr_ioctl, (const void __user *) ioctl_param, + sizeof(buf_attr_ioctl))) { + ERR("copy_from_user() failed"); + return -EFAULT; + } + DBG("IOCTL: SET_BUF_ATTR: fd = %d", buf_attr_ioctl.fd); + + dmabuf = dma_buf_get(buf_attr_ioctl.fd); + if (IS_ERR(dmabuf)) { + ERR("dmabuf ptr not found for dma_buf_fd = %d", buf_attr_ioctl.fd); + return PTR_ERR(dmabuf); + } + ret = ubwcp_set_buf_attrs(dmabuf, &buf_attr_ioctl.attr); + dma_buf_put(dmabuf); + return ret; +} + +static int ioctl_get_hw_ver(struct ubwcp_driver *ubwcp, unsigned long ioctl_param) +{ + struct ubwcp_ioctl_hw_version hw_ver; + + DBG("IOCTL: GET_HW_VER"); + if (ubwcp_get_hw_version(&hw_ver)) + return -EINVAL; + + if (copy_to_user((void __user *)ioctl_param, &hw_ver, sizeof(hw_ver))) { + ERR("copy_to_user() failed"); + return -EFAULT; + } + return 0; +} + +static int ioctl_get_stride_align(struct ubwcp_driver *ubwcp, unsigned long ioctl_param) +{ + struct ubwcp_ioctl_stride_align stride_align_ioctl; + enum ubwcp_std_image_format format; + + DBG("IOCTL: GET_STRIDE_ALIGN"); + if (copy_from_user(&stride_align_ioctl, (const void __user *) ioctl_param, + sizeof(stride_align_ioctl))) { + ERR("copy_from_user() failed"); + return -EFAULT; + } + + if (stride_align_ioctl.unused != 0) { + ERR("unused values must be set to 0"); + return -EINVAL; + } + + if (!ioctl_format_is_valid(stride_align_ioctl.image_format)) { + ERR("invalid image format: %d", stride_align_ioctl.image_format); + return -EINVAL; + } + + if (stride_align_ioctl.image_format == UBWCP_LINEAR) { + ERR("not supported for LINEAR format"); + return -EINVAL; + } + + if (to_std_format(stride_align_ioctl.image_format, &format)) { + ERR("Unable to map ioctl image format to std image format"); + return -EINVAL; + } + + if (get_stride_alignment(format, &stride_align_ioctl.stride_align)) { + ERR("failed for format: %d", format); + return -EFAULT; + } + + if (copy_to_user((void __user *)ioctl_param, &stride_align_ioctl, + sizeof(stride_align_ioctl))) { + ERR("copy_to_user() failed"); + return -EFAULT; + } + + return 0; +} + +static int ioctl_validate_stride(struct ubwcp_driver *ubwcp, unsigned long ioctl_param) +{ + struct ubwcp_ioctl_validate_stride validate_stride_ioctl; + enum ubwcp_std_image_format format; + + DBG("IOCTL: VALIDATE_STRIDE"); + if (copy_from_user(&validate_stride_ioctl, (const void __user *) ioctl_param, + sizeof(validate_stride_ioctl))) { + ERR("copy_from_user() failed"); + return -EFAULT; + } + + if (validate_stride_ioctl.unused1 || validate_stride_ioctl.unused2) { + ERR("unused values must be set to 0"); + return -EINVAL; + } + + if (!ioctl_format_is_valid(validate_stride_ioctl.image_format)) { + ERR("not supported for LINEAR format"); + return -EINVAL; + } + + if (validate_stride_ioctl.image_format == UBWCP_LINEAR) { + ERR("not supported for LINEAR format"); + return -EINVAL; + } + + if (to_std_format(validate_stride_ioctl.image_format, &format)) { + ERR("Unable to map ioctl image format to std image format"); + return -EINVAL; + } + + validate_stride_ioctl.valid = stride_is_valid(ubwcp, format, validate_stride_ioctl.width, + validate_stride_ioctl.stride); + + if (copy_to_user((void __user *)ioctl_param, &validate_stride_ioctl, + sizeof(validate_stride_ioctl))) { + ERR("copy_to_user() failed"); + return -EFAULT; + } + + return 0; +} + /* handle IOCTLs */ static long ubwcp_ioctl(struct file *file, unsigned int ioctl_num, unsigned long ioctl_param) { - struct ubwcp_ioctl_buffer_attrs buf_attr_ioctl; - struct ubwcp_ioctl_hw_version hw_ver; - struct ubwcp_ioctl_validate_stride validate_stride_ioctl; - struct ubwcp_ioctl_stride_align stride_align_ioctl; - enum ubwcp_std_image_format format; struct ubwcp_driver *ubwcp; ubwcp = ubwcp_get_driver(); @@ -2295,93 +2437,16 @@ static long ubwcp_ioctl(struct file *file, unsigned int ioctl_num, unsigned long switch (ioctl_num) { case UBWCP_IOCTL_SET_BUF_ATTR: - if (copy_from_user(&buf_attr_ioctl, (const void __user *) ioctl_param, - sizeof(buf_attr_ioctl))) { - ERR("ERROR: copy_from_user() failed"); - return -EFAULT; - } - DBG("IOCTL : SET_BUF_ATTR: fd = %d", buf_attr_ioctl.fd); - - if (buf_attr_ioctl.attr.unused1 || buf_attr_ioctl.attr.unused2 - || buf_attr_ioctl.attr.unused3 || buf_attr_ioctl.attr.unused4 - || buf_attr_ioctl.attr.unused5 || buf_attr_ioctl.attr.unused6 - || buf_attr_ioctl.attr.unused7 || buf_attr_ioctl.attr.unused8 - || buf_attr_ioctl.attr.unused9) { - ERR("ERROR: buf attr unused values must be set to 0"); - return -EINVAL; - } - - return ubwcp_set_buf_attrs_ioctl(&buf_attr_ioctl); + return ioctl_set_buf_attr(ubwcp, ioctl_param); case UBWCP_IOCTL_GET_HW_VER: - DBG("IOCTL : GET_HW_VER"); - - if (ubwcp_get_hw_version(&hw_ver)) - return -EINVAL; - - if (copy_to_user((void __user *)ioctl_param, &hw_ver, sizeof(hw_ver))) { - ERR("ERROR: copy_to_user() failed"); - return -EFAULT; - } - break; + return ioctl_get_hw_ver(ubwcp, ioctl_param); case UBWCP_IOCTL_GET_STRIDE_ALIGN: - DBG("IOCTL : GET_STRIDE_ALIGN"); - if (copy_from_user(&stride_align_ioctl, (const void __user *) ioctl_param, - sizeof(stride_align_ioctl))) { - ERR("ERROR: copy_from_user() failed"); - return -EFAULT; - } - - format = to_std_format(stride_align_ioctl.image_format); - if (format == STD_IMAGE_FORMAT_INVALID) - return -EINVAL; - - if (stride_align_ioctl.unused != 0) - return -EINVAL; - - if (get_stride_alignment(format, &stride_align_ioctl.stride_align)) { - ERR("ERROR: copy_to_user() failed"); - return -EFAULT; - } - - if (copy_to_user((void __user *)ioctl_param, &stride_align_ioctl, - sizeof(stride_align_ioctl))) { - ERR("ERROR: copy_to_user() failed"); - return -EFAULT; - } - break; + return ioctl_get_stride_align(ubwcp, ioctl_param); case UBWCP_IOCTL_VALIDATE_STRIDE: - DBG("IOCTL : VALIDATE_STRIDE"); - if (copy_from_user(&validate_stride_ioctl, (const void __user *) ioctl_param, - sizeof(validate_stride_ioctl))) { - ERR("ERROR: copy_from_user() failed"); - return -EFAULT; - } - - if (validate_stride_ioctl.unused1 || validate_stride_ioctl.unused2) { - ERR("ERROR: unused values must be set to 0"); - return -EINVAL; - } - - format = to_std_format(validate_stride_ioctl.image_format); - if (format == STD_IMAGE_FORMAT_INVALID) { - ERR("ERROR: invalid format: %d", validate_stride_ioctl.image_format); - return -EINVAL; - } - - validate_stride_ioctl.valid = stride_is_valid(ubwcp, - validate_stride_ioctl.image_format, - validate_stride_ioctl.width, - validate_stride_ioctl.stride); - - if (copy_to_user((void __user *)ioctl_param, &validate_stride_ioctl, - sizeof(validate_stride_ioctl))) { - ERR("ERROR: copy_to_user() failed"); - return -EFAULT; - } - break; + return ioctl_validate_stride(ubwcp, ioctl_param); default: ERR("Invalid ioctl_num = %d", ioctl_num); From ec0aef521118ea4582c8a79f017c8b5d01e26d48 Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Thu, 25 May 2023 14:01:34 -0700 Subject: [PATCH 33/35] ubwcp: do not ignore buf size calculation failure Failure to look up plane information during size calculatin was returning size of 0. Instead return error and let the caller handle error. This removes need for warn calls and communicates error up the caller. Change-Id: Ie147e20a64077fe586c7cd29ce91fe9dfe22a96c Signed-off-by: Amol Jadi --- ubwcp/ubwcp_main.c | 94 ++++++++++++++++++---------------------------- 1 file changed, 37 insertions(+), 57 deletions(-) diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index e7fe398bd8..bd2603986c 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -530,6 +530,7 @@ err: atomic_dec(&ubwcp->num_non_lin_buffers); mutex_unlock(&ubwcp->mem_hotplug_lock); ubwcp->state = UBWCP_STATE_FAULT; + ERR("state set to fault"); return -1; } @@ -568,6 +569,7 @@ err: atomic_inc(&ubwcp->num_non_lin_buffers); mutex_unlock(&ubwcp->mem_hotplug_lock); ubwcp->state = UBWCP_STATE_FAULT; + ERR("state set to fault"); return -1; } @@ -940,17 +942,11 @@ err: /* calculate and return metadata buffer size for a given plane * and buffer attributes - * NOTE: in this function, we will only pass in NV12 format. - * NOT NV12_Y or NV12_UV etc. - * the Y or UV information is in the "plane" - * "format" here purely means "encoding format" and no information - * if some plane data is missing. */ -static size_t metadata_buf_sz(struct ubwcp_driver *ubwcp, +static int metadata_buf_sz(struct ubwcp_driver *ubwcp, enum ubwcp_std_image_format format, - u32 width, u32 height, u8 plane) + u32 width, u32 height, u8 plane, size_t *size) { - size_t size; u64 pitch; u64 lines; u64 tile_width; @@ -966,10 +962,8 @@ static size_t metadata_buf_sz(struct ubwcp_driver *ubwcp, DBG_BUF_ATTR("Calculating metadata buffer size: format = %d, plane = %d", format, plane); if (plane >= f_info.planes) { - ERR("Format does not have requested plane info: format: %d, plane: %d", - format, plane); - WARN(1, "Fix this!!!!!"); - return 0; + ERR("Missing plane info: format: %d, plane: %d", format, plane); + return -1; } p_info = f_info.p_info[plane]; @@ -996,21 +990,20 @@ static size_t metadata_buf_sz(struct ubwcp_driver *ubwcp, DBG_BUF_ATTR("size (p*l*bytes) : %d", pitch*lines*1); /* x1 below is only to clarify that we are multiplying by 1 bytes/tile */ - size = UBWCP_ALIGN(pitch*lines*1, META_DATA_SIZE_ALIGN); + *size = UBWCP_ALIGN(pitch*lines*1, META_DATA_SIZE_ALIGN); - DBG_BUF_ATTR("size (aligned 4K): %zu (0x%zx)", size, size); - return size; + DBG_BUF_ATTR("size (aligned 4K): %zu (0x%zx)", *size, *size); + return 0; } /* calculate and return size of pixel data buffer for a given plane * and buffer attributes */ -static size_t pixeldata_buf_sz(struct ubwcp_driver *ubwcp, +static int pixeldata_buf_sz(struct ubwcp_driver *ubwcp, u16 format, u32 width, - u32 height, u8 plane) + u32 height, u8 plane, size_t *size) { - size_t size; u64 pitch; u64 lines; u16 pixel_bytes; @@ -1028,10 +1021,8 @@ static size_t pixeldata_buf_sz(struct ubwcp_driver *ubwcp, DBG_BUF_ATTR("Calculating Pixeldata buffer size: format = %d, plane = %d", format, plane); if (plane >= f_info.planes) { - ERR("Format does not have requested plane info: format: %d, plane: %d", - format, plane); - WARN(1, "Fix this!!!!!"); - return 0; + ERR("Missing plane info: format: %d, plane: %d", format, plane); + return -1; } p_info = f_info.p_info[plane]; @@ -1060,11 +1051,9 @@ static size_t pixeldata_buf_sz(struct ubwcp_driver *ubwcp, DBG_BUF_ATTR("lines : %d", lines); DBG_BUF_ATTR("size (p*l*bytes) : %d", (pitch*lines*pixel_bytes)/per_pixel); - size = UBWCP_ALIGN((pitch*lines*pixel_bytes)/per_pixel, PIXEL_DATA_SIZE_ALIGN); - - DBG_BUF_ATTR("size (aligned 4K): %zu (0x%zx)", size, size); - - return size; + *size = UBWCP_ALIGN((pitch*lines*pixel_bytes)/per_pixel, PIXEL_DATA_SIZE_ALIGN); + DBG_BUF_ATTR("size (aligned 4K): %zu (0x%zx)", *size, *size); + return 0; } static int get_tile_height(struct ubwcp_driver *ubwcp, enum ubwcp_std_image_format format, @@ -1230,13 +1219,6 @@ static int ubwcp_calc_ula_params(struct ubwcp_driver *ubwcp, /* Number of "expected" planes in "the standard defined" image format */ planes = planes_in_format(format); - - /* any plane missing? - * valid missing_plane values: - * 0 == no plane missing - * 1 == 1st plane missing - * 2 == 2nd plane missing - */ missing_plane = missing_plane_from_format(attr->image_format); DBG_BUF_ATTR("ula params -->"); @@ -1314,21 +1296,18 @@ static int ubwcp_calc_ubwcp_buf_params(struct ubwcp_driver *ubwcp, *pd_p1 = 0; *stride_tp10_b = 0; - if (!missing_plane) { - *md_p0 = metadata_buf_sz(ubwcp, format, attr->width, attr->height, 0); - *pd_p0 = pixeldata_buf_sz(ubwcp, format, attr->width, attr->height, 0); - if (planes == 2) { - *md_p1 = metadata_buf_sz(ubwcp, format, attr->width, attr->height, 1); - *pd_p1 = pixeldata_buf_sz(ubwcp, format, attr->width, attr->height, 1); - } - } else { - if (missing_plane == 1) { - *md_p1 = metadata_buf_sz(ubwcp, format, attr->width, attr->height, 1); - *pd_p1 = pixeldata_buf_sz(ubwcp, format, attr->width, attr->height, 1); - } else { - *md_p0 = metadata_buf_sz(ubwcp, format, attr->width, attr->height, 0); - *pd_p0 = pixeldata_buf_sz(ubwcp, format, attr->width, attr->height, 0); - } + if (missing_plane != 1) { + if (metadata_buf_sz(ubwcp, format, attr->width, attr->height, 0, md_p0)) + return -1; + if (pixeldata_buf_sz(ubwcp, format, attr->width, attr->height, 0, pd_p0)) + return -1; + } + + if ((planes == 2) && (missing_plane != 2)){ + if (metadata_buf_sz(ubwcp, format, attr->width, attr->height, 1, md_p1)) + return -1; + if (pixeldata_buf_sz(ubwcp, format, attr->width, attr->height, 1, pd_p1)) + return -1; } if (format == TP10) { @@ -1828,6 +1807,8 @@ static int range_check_disable(struct ubwcp_driver *ubwcp, int idx) trace_ubwcp_hw_flush_start(0); ret = ubwcp_hw_disable_range_check_with_flush(ubwcp->base, idx); trace_ubwcp_hw_flush_end(0); + if (ret) + ERR("disable_range_check_with_flush() failed: %d", ret); mutex_unlock(&ubwcp->hw_range_ck_lock); mutex_unlock(&ubwcp->ubwcp_flush_lock); return ret; @@ -1956,7 +1937,7 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir) ret = ubwcp_flush(ubwcp); if (ret) { ubwcp->state = UBWCP_STATE_FAULT; - ERR("ubwcp_flush() failed: %d, driver state set to FAULT", ret); + ERR("state set to fault"); goto err_flush_failed; } @@ -2033,7 +2014,7 @@ static int unlock_internal(struct ubwcp_buf *buf, enum dma_data_direction dir, b ret = range_check_disable(ubwcp, buf->desc->idx); if (ret) { ubwcp->state = UBWCP_STATE_FAULT; - ERR("disable_range_check_with_flush() failed: %d, driver state set to FAULT", ret); + ERR("state set to fault"); } /* release descriptor if perm range xlation is not set */ @@ -2256,7 +2237,7 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) if (buf->desc) { if (!buf->perm) { ubwcp->state = UBWCP_STATE_FAULT; - WARN_ON(true); + ERR("state set to fault"); } ubwcp_buf_desc_free(buf->ubwcp, buf->desc); buf->desc = NULL; @@ -2489,6 +2470,7 @@ static int read_err_w_op(void *data, u64 value) return 0; err: ubwcp->state = UBWCP_STATE_FAULT; + ERR("state set to fault"); return -1; } @@ -2522,6 +2504,7 @@ static int write_err_w_op(void *data, u64 value) return 0; err: ubwcp->state = UBWCP_STATE_FAULT; + ERR("state set to fault"); return -1; } @@ -2555,6 +2538,7 @@ static int decode_err_w_op(void *data, u64 value) return 0; err: ubwcp->state = UBWCP_STATE_FAULT; + ERR("state set to fault"); return -1; } @@ -2588,6 +2572,7 @@ static int encode_err_w_op(void *data, u64 value) return 0; err: ubwcp->state = UBWCP_STATE_FAULT; + ERR("state set to fault"); return -1; } @@ -3382,7 +3367,6 @@ static int qcom_ubwcp_remove(struct platform_device *pdev) if (psize != avail) { ERR("gen_pool is not empty! avail: %zx size: %zx", avail, psize); ERR("skipping pool destroy....cause it will PANIC. Fix this!!!!"); - WARN(1, "Fix this!"); } else { gen_pool_destroy(ubwcp->ula_pool); } @@ -3410,8 +3394,6 @@ static int ubwcp_probe(struct platform_device *pdev) of_property_read_string(pdev->dev.of_node, "compatible", &compatible); ERR("unknown device: %s", compatible); - - WARN_ON(1); return -EINVAL; } @@ -3436,8 +3418,6 @@ static int ubwcp_remove(struct platform_device *pdev) of_property_read_string(pdev->dev.of_node, "compatible", &compatible); ERR("unknown device: %s", compatible); - - WARN_ON(1); return -EINVAL; } From 1b191f12dcdf9edf0f0eaf9aad0224400e56177e Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Thu, 25 May 2023 14:40:32 -0700 Subject: [PATCH 34/35] ubwcp: incorrect return of ubwcp buf when dmabuf match is not found When dmabuf does not exists in our record, we were matching it to the last ubwcp buf instead of returning NULL. Also fixed incorrect state check for hw ver call. Change-Id: If899bab884a049dcbb8a9acd45a706d10e6d77d4 Signed-off-by: Amol Jadi --- ubwcp/ubwcp_main.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index bd2603986c..0ba1775977 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -353,6 +353,7 @@ static int ubwcp_power(struct ubwcp_driver *ubwcp, bool enable) static struct ubwcp_buf *dma_buf_to_ubwcp_buf(struct dma_buf *dmabuf) { struct ubwcp_buf *buf = NULL; + struct ubwcp_buf *ret_buf = NULL; struct ubwcp_driver *ubwcp = ubwcp_get_driver(); unsigned long flags; @@ -362,12 +363,14 @@ static struct ubwcp_buf *dma_buf_to_ubwcp_buf(struct dma_buf *dmabuf) spin_lock_irqsave(&ubwcp->buf_table_lock, flags); /* look up ubwcp_buf corresponding to this dma_buf */ hash_for_each_possible(ubwcp->buf_table, buf, hnode, (u64)dmabuf) { - if (buf->dma_buf == dmabuf) + if (buf->dma_buf == dmabuf) { + ret_buf = buf; break; + } } spin_unlock_irqrestore(&ubwcp->buf_table_lock, flags); - return buf; + return ret_buf; } @@ -387,7 +390,7 @@ int ubwcp_get_hw_version(struct ubwcp_ioctl_hw_version *ver) if (!ubwcp) return -1; - if (ubwcp->state != UBWCP_STATE_FAULT) + if (ubwcp->state == UBWCP_STATE_INVALID) return -EPERM; ver->major = ubwcp->hw_ver_major; From 9ee78c53aecfe72913f5644c138e92504e181e3d Mon Sep 17 00:00:00 2001 From: Amol Jadi Date: Fri, 26 May 2023 12:38:14 -0700 Subject: [PATCH 35/35] ubwcp: rate limit error prints Rate limit error messages to avoid flooding logs and trigger watchdog. Change-Id: I8584ec441586d7516b3049f12d353b0d674d4453 Signed-off-by: Amol Jadi --- ubwcp/ubwcp_hw.c | 10 +++++---- ubwcp/ubwcp_main.c | 56 ++++++++++------------------------------------ 2 files changed, 18 insertions(+), 48 deletions(-) diff --git a/ubwcp/ubwcp_hw.c b/ubwcp/ubwcp_hw.c index 8ef154f145..2d0becf2ed 100644 --- a/ubwcp/ubwcp_hw.c +++ b/ubwcp/ubwcp_hw.c @@ -1,8 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ +#define pr_fmt(fmt) "%s: hw: %s(): " fmt, KBUILD_MODNAME, __func__ + #include #include #include @@ -17,10 +19,10 @@ static bool ubwcp_hw_trace_en; //#define DBG(fmt, args...) #define DBG(fmt, args...) \ do { \ - if (ubwcp_hw_trace_en) \ - pr_err("ubwcp: hw: %s(): " fmt "\n", __func__, ##args); \ + if (unlikely(ubwcp_hw_trace_en)) \ + pr_err(fmt "\n", ##args); \ } while (0) -#define ERR(fmt, args...) pr_err("ubwcp: hw: %s(): ~~~ERROR~~~: " fmt "\n", __func__, ##args); +#define ERR(fmt, args...) pr_err_ratelimited(": %d: ~~~ERROR~~~: " fmt "\n", __LINE__, ##args) MODULE_LICENSE("GPL"); diff --git a/ubwcp/ubwcp_main.c b/ubwcp/ubwcp_main.c index 0ba1775977..70e694e398 100644 --- a/ubwcp/ubwcp_main.c +++ b/ubwcp/ubwcp_main.c @@ -3,6 +3,8 @@ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ +#define pr_fmt(fmt) "%s: %s(): " fmt, KBUILD_MODNAME, __func__ + #include #include #include @@ -49,17 +51,13 @@ MODULE_IMPORT_NS(DMA_BUF); #define UBWCP_ALIGN(_x, _y) ((((_x) + (_y) - 1)/(_y))*(_y)) -#define DBG_BUF_ATTR(fmt, args...) do { if (ubwcp_debug_trace_enable) \ - pr_err("ubwcp: %s(): " fmt "\n", __func__, ##args); \ +#define DBG_BUF_ATTR(fmt, args...) do { if (unlikely(ubwcp_debug_trace_enable)) \ + pr_err(fmt "\n", ##args); \ } while (0) -#define DBG(fmt, args...) do { if (ubwcp_debug_trace_enable) \ - pr_err("ubwcp: %s(): " fmt "\n", __func__, ##args); \ +#define DBG(fmt, args...) do { if (unlikely(ubwcp_debug_trace_enable)) \ + pr_err(fmt "\n", ##args); \ } while (0) -#define ERR(fmt, args...) pr_err("ubwcp: %d: %s(): ~~~ERROR~~~: " fmt "\n", __LINE__, __func__, ##args) -#define ERR_RATE_LIMIT(fmt, args...) pr_err_ratelimited("ubwcp: %s(): ~~~ERROR~~~: " fmt "\n",\ - __func__, ##args) - -#define FENTRY() DBG("") +#define ERR(fmt, args...) pr_err_ratelimited("%d: ~~~ERROR~~~: " fmt "\n", __LINE__, ##args) #define META_DATA_PITCH_ALIGN 64 #define META_DATA_HEIGHT_ALIGN 16 @@ -379,8 +377,6 @@ int ubwcp_get_hw_version(struct ubwcp_ioctl_hw_version *ver) { struct ubwcp_driver *ubwcp; - FENTRY(); - if (!ver) { ERR("invalid version ptr"); return -EINVAL; @@ -592,7 +588,6 @@ static int ubwcp_init_buffer(struct dma_buf *dmabuf) struct ubwcp_driver *ubwcp = ubwcp_get_driver(); unsigned long flags; - FENTRY(); trace_ubwcp_init_buffer_start(dmabuf); if (!ubwcp) { @@ -1279,8 +1274,6 @@ static int ubwcp_calc_ubwcp_buf_params(struct ubwcp_driver *ubwcp, size_t stride_tp10_p; int ret; - FENTRY(); - ret = to_std_format(attr->image_format, &format); if (ret) return ret; @@ -1371,7 +1364,6 @@ static phys_addr_t ubwcp_ula_realloc(struct ubwcp_driver *ubwcp, /* unmap dma buf */ static void ubwcp_dma_unmap(struct ubwcp_buf *buf) { - FENTRY(); if (buf->dma_buf && buf->attachment) { DBG("Calling dma_buf_unmap_attachment()"); dma_buf_unmap_attachment(buf->attachment, buf->sgt, DMA_BIDIRECTIONAL); @@ -1519,7 +1511,6 @@ int ubwcp_set_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) bool is_non_lin_buf; u16 hw_img_format; - FENTRY(); trace_ubwcp_set_buf_attrs_start(dmabuf); if (!dmabuf) { @@ -1844,7 +1835,6 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir) struct ubwcp_buf *buf; struct ubwcp_driver *ubwcp; - FENTRY(); trace_ubwcp_lock_start(dmabuf); if (!dmabuf) { @@ -2046,7 +2036,6 @@ static int ubwcp_unlock(struct dma_buf *dmabuf, enum dma_data_direction dir) struct ubwcp_buf *buf; int ret; - FENTRY(); trace_ubwcp_unlock_start(dmabuf); if (!dmabuf) { ERR("NULL dmabuf input ptr"); @@ -2093,8 +2082,6 @@ int ubwcp_get_buf_attrs(struct dma_buf *dmabuf, struct ubwcp_buffer_attrs *attr) int ret = 0; struct ubwcp_buf *buf; - FENTRY(); - if (!dmabuf) { ERR("NULL dmabuf input ptr"); return -EINVAL; @@ -2144,8 +2131,6 @@ int ubwcp_set_perm_range_translation(struct dma_buf *dmabuf, bool enable) int ret = 0; struct ubwcp_buf *buf; - FENTRY(); - if (!dmabuf) { ERR("NULL dmabuf input ptr"); return -EINVAL; @@ -2203,7 +2188,6 @@ static int ubwcp_free_buffer(struct dma_buf *dmabuf) unsigned long flags; bool is_non_lin_buf; - FENTRY(); trace_ubwcp_free_buffer_start(dmabuf); if (!dmabuf) { @@ -2901,7 +2885,7 @@ int ubwcp_iommu_fault_handler(struct iommu_domain *domain, struct device *dev, err.smmu_err.dmabuf = get_dma_buf_from_iova(iova); err.smmu_err.iova = iova; err.smmu_err.iommu_fault_flags = flags; - ERR_RATE_LIMIT("ubwcp_err: err code: %d (smmu), iommu_dev_id: %d, iova: 0x%llx, flags: 0x%x", + ERR("ubwcp_err: err code: %d (smmu), iommu_dev_id: %d, iova: 0x%llx, flags: 0x%x", err.err_code, err.smmu_err.iommu_dev_id, err.smmu_err.iova, err.smmu_err.iommu_fault_flags); ubwcp_notify_error_handlers(&err); @@ -2926,7 +2910,7 @@ static irqreturn_t ubwcp_irq_handler(int irq, void *ptr) err.translation_err.dmabuf = get_dma_buf_from_ulapa(addr); err.translation_err.ula_pa = addr; err.translation_err.read = true; - ERR_RATE_LIMIT("ubwcp_err: err code: %d (range), dmabuf: 0x%llx, read: %d, addr: 0x%llx", + ERR("err_code: %d (range read), dmabuf: 0x%llx, read: %d, addr: 0x%llx", err.err_code, err.translation_err.dmabuf, err.translation_err.read, addr); ubwcp_notify_error_handlers(&err); ubwcp_hw_interrupt_clear(ubwcp->base, 0); @@ -2937,7 +2921,7 @@ static irqreturn_t ubwcp_irq_handler(int irq, void *ptr) err.translation_err.dmabuf = get_dma_buf_from_ulapa(addr); err.translation_err.ula_pa = addr; err.translation_err.read = false; - ERR_RATE_LIMIT("ubwcp_err: err code: %d (range), dmabuf: 0x%llx, read: %d, addr: 0x%llx", + ERR("err_code: %d (range write), dmabuf: 0x%llx, read: %d, addr: 0x%llx", err.err_code, err.translation_err.dmabuf, err.translation_err.read, addr); ubwcp_notify_error_handlers(&err); ubwcp_hw_interrupt_clear(ubwcp->base, 1); @@ -2946,7 +2930,7 @@ static irqreturn_t ubwcp_irq_handler(int irq, void *ptr) err.err_code = UBWCP_ENCODE_ERROR; err.enc_err.dmabuf = get_dma_buf_from_ulapa(addr); err.enc_err.ula_pa = addr; - ERR_RATE_LIMIT("ubwcp_err: err code: %d (encode), dmabuf: 0x%llx, addr: 0x%llx", + ERR("err_code: %d (encode), dmabuf: 0x%llx, addr: 0x%llx", err.err_code, err.enc_err.dmabuf, addr); ubwcp_notify_error_handlers(&err); ubwcp_hw_interrupt_clear(ubwcp->base, 3); @@ -2955,7 +2939,7 @@ static irqreturn_t ubwcp_irq_handler(int irq, void *ptr) err.err_code = UBWCP_DECODE_ERROR; err.dec_err.dmabuf = get_dma_buf_from_ulapa(addr); err.dec_err.ula_pa = addr; - ERR_RATE_LIMIT("ubwcp_err: err code: %d (decode), dmabuf: 0x%llx, addr: 0x%llx", + ERR("err_code: %d (decode), dmabuf: 0x%llx, addr: 0x%llx", err.err_code, err.enc_err.dmabuf, addr); ubwcp_notify_error_handlers(&err); ubwcp_hw_interrupt_clear(ubwcp->base, 2); @@ -2972,8 +2956,6 @@ static int ubwcp_interrupt_register(struct platform_device *pdev, struct ubwcp_d int ret = 0; struct device *dev = &pdev->dev; - FENTRY(); - ubwcp->irq_range_ck_rd = platform_get_irq(pdev, 0); if (ubwcp->irq_range_ck_rd < 0) return ubwcp->irq_range_ck_rd; @@ -3033,8 +3015,6 @@ static int qcom_ubwcp_probe(struct platform_device *pdev) struct ubwcp_driver *ubwcp; struct device *ubwcp_dev = &pdev->dev; - FENTRY(); - ubwcp = devm_kzalloc(ubwcp_dev, sizeof(*ubwcp), GFP_KERNEL); if (!ubwcp) { ERR("devm_kzalloc() failed"); @@ -3197,8 +3177,6 @@ static int ubwcp_probe_cb_buf(struct platform_device *pdev) struct ubwcp_driver *ubwcp; struct iommu_domain *domain = NULL; - FENTRY(); - ubwcp = dev_get_drvdata(pdev->dev.parent); if (!ubwcp) { ERR("failed to get ubwcp ptr"); @@ -3223,8 +3201,6 @@ static int ubwcp_probe_cb_desc(struct platform_device *pdev) struct ubwcp_driver *ubwcp; struct iommu_domain *domain = NULL; - FENTRY(); - ubwcp = dev_get_drvdata(pdev->dev.parent); if (!ubwcp) { ERR("failed to get ubwcp ptr"); @@ -3292,8 +3268,6 @@ static int ubwcp_remove_cb_buf(struct platform_device *pdev) { struct ubwcp_driver *ubwcp; - FENTRY(); - ubwcp = dev_get_drvdata(pdev->dev.parent); if (!ubwcp) { ERR("failed to get ubwcp ptr"); @@ -3310,8 +3284,6 @@ static int ubwcp_remove_cb_desc(struct platform_device *pdev) { struct ubwcp_driver *ubwcp; - FENTRY(); - ubwcp = dev_get_drvdata(pdev->dev.parent); if (!ubwcp) { ERR("failed to get ubwcp ptr"); @@ -3345,8 +3317,6 @@ static int qcom_ubwcp_remove(struct platform_device *pdev) size_t psize; struct ubwcp_driver *ubwcp; - FENTRY(); - /* get pdev->dev->driver_data = ubwcp */ ubwcp = platform_get_drvdata(pdev); if (!ubwcp) { @@ -3385,7 +3355,6 @@ static int ubwcp_probe(struct platform_device *pdev) { const char *compatible = ""; - FENTRY(); trace_ubwcp_probe(pdev); if (of_device_is_compatible(pdev->dev.of_node, "qcom,ubwcp")) @@ -3405,7 +3374,6 @@ static int ubwcp_remove(struct platform_device *pdev) { const char *compatible = ""; - FENTRY(); trace_ubwcp_remove(pdev); /* TBD: what if buffers are still allocated? locked? etc.